Add verif results
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Thu, 18 Jun 2009 20:59:35 +0000 (16:59 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Thu, 18 Jun 2009 20:59:35 +0000 (16:59 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
225 files changed:
formal-model/urcu-controldataflow-alpha-ipi-compress/.input.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/.input.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/DEFINES [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/Makefile [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/references.txt [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu.sh [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_nested.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_no_mb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_no_rmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_no_wmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_single_flip.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_reader.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_reader.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer_error.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/.input.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/DEFINES [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/Makefile [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/asserts.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/asserts.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/references.txt [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu.sh [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_nested.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_reader.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_reader.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_reader.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/.input.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/DEFINES [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/Makefile [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/asserts.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/asserts.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/references.txt [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu.sh [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_nested.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_progress.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_progress_reader.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_progress_writer.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-ipi/urcu_progress_writer_error.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/.input.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/DEFINES [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/Makefile [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/asserts.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/asserts.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/references.txt [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu.sh [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_nested.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_reader.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_reader.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_reader.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.define [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.log [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/.input.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/.input.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/.input.spin.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/DEFINES [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/Makefile [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/references.txt [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu.sh [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_nested.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_no_mb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_no_rmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_no_wmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_single_flip.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_reader.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_reader.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_reader.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/.input.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/.input.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/DEFINES [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/Makefile [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/references.txt [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu.sh [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_nested.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_rmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_rmb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_rmb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_progress.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_progress_reader.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_progress_writer.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_progress_writer.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-ipi/urcu_progress_writer_error.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/.input.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/DEFINES [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/Makefile [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/asserts.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/asserts.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/references.txt [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu.sh [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu.spin [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_nested.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_rmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_rmb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_rmb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.spin.input.trail [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress.ltl [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_reader.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_reader.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_reader.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.define [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.log [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.spin.input [new file with mode: 0644]
formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.spin.input.trail [new file with mode: 0644]

diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/.input.define b/formal-model/urcu-controldataflow-alpha-ipi-compress/.input.define
new file mode 100644 (file)
index 0000000..ff3f783
--- /dev/null
@@ -0,0 +1 @@
+#define READER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/.input.spin b/formal-model/urcu-controldataflow-alpha-ipi-compress/.input.spin
new file mode 100644 (file)
index 0000000..887b1ad
--- /dev/null
@@ -0,0 +1,1340 @@
+#define READER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/DEFINES b/formal-model/urcu-controldataflow-alpha-ipi-compress/DEFINES
new file mode 100644 (file)
index 0000000..2681f69
--- /dev/null
@@ -0,0 +1,18 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/Makefile b/formal-model/urcu-controldataflow-alpha-ipi-compress/Makefile
new file mode 100644 (file)
index 0000000..f8bfd31
--- /dev/null
@@ -0,0 +1,171 @@
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) Mathieu Desnoyers, 2009
+#
+# Authors: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+#CFLAGS=-DSAFETY
+#for multi-core verif, 15.5GB shared mem, use files if full
+#CFLAGS=-DHASH64 -DMEMLIM=15500 -DNCORE=2
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88 -DMEMLIM=15500 -DNCORE=8
+
+#liveness
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88
+CFLAGS=-DHASH64 -DCOLLAPSE
+#CFLAGS=-DHASH64
+
+SPINFILE=urcu.spin
+
+default:
+       #make urcu_free | tee urcu_free.log
+       #make urcu_free_no_mb | tee urcu_free_no_mb.log
+       #make urcu_free_no_rmb | tee urcu_free_no_rmb.log
+       #make urcu_free_no_wmb | tee urcu_free_no_wmb.log
+       #make urcu_free_single_flip | tee urcu_free_single_flip.log
+       make urcu_progress_writer | tee urcu_progress_writer.log
+       make urcu_progress_reader | tee urcu_progress_reader.log
+       make urcu_progress_writer_error | tee urcu_progress_writer_error.log
+       #make asserts | tee asserts.log
+       make summary
+
+#show trail : spin -v -t -N pan.ltl input.spin
+# after each individual make.
+
+summary:
+       @echo
+       @echo "Verification summary"
+       @grep errors: *.log
+
+asserts: clean
+       cat DEFINES > .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X .input.spin
+       gcc -O2 -w ${CFLAGS} -DSAFETY -o pan pan.c
+       ./pan -v -c1 -X -m10000000 -w20
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free: clean urcu_free_ltl run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested: clean urcu_free_ltl urcu_free_nested_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested_define:
+       cp urcu_free_nested.define .input.define
+
+urcu_free_no_rmb: clean urcu_free_ltl urcu_free_no_rmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_rmb_define:
+       cp urcu_free_no_rmb.define .input.define
+
+urcu_free_no_wmb: clean urcu_free_ltl urcu_free_no_wmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_wmb_define:
+       cp urcu_free_no_wmb.define .input.define
+
+urcu_free_no_mb: clean urcu_free_ltl urcu_free_no_mb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_mb_define:
+       cp urcu_free_no_mb.define .input.define
+
+urcu_free_single_flip: clean urcu_free_ltl urcu_free_single_flip_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_single_flip_define:
+       cp urcu_free_single_flip.define .input.define
+
+urcu_free_ltl:
+       touch .input.define
+       cat .input.define >> pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+
+# Progress checks
+
+urcu_progress_writer: clean urcu_progress_writer_ltl \
+               urcu_progress_writer_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_define:
+       cp urcu_progress_writer.define .input.define
+
+urcu_progress_writer_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_reader: clean urcu_progress_reader_ltl \
+               urcu_progress_reader_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_reader_define:
+       cp urcu_progress_reader.define .input.define
+
+urcu_progress_reader_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_writer_error: clean urcu_progress_writer_error_ltl \
+               urcu_progress_writer_error_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_error_define:
+       cp urcu_progress_writer_error.define .input.define
+
+urcu_progress_writer_error_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+
+run_weak_fair: pan
+       ./pan -a -f -v -c1 -X -m10000000 -w20
+
+run: pan
+       ./pan -a -v -c1 -X -m10000000 -w20
+
+pan: pan.c
+       gcc -O2 -w ${CFLAGS} -o pan pan.c
+
+pan.c: pan.ltl ${SPINFILE}
+       cat .input.define > .input.spin
+       cat DEFINES >> .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X -N pan.ltl .input.spin
+
+.PHONY: clean default distclean summary
+clean:
+       rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+distclean:
+       rm -f *.trail *.input *.log
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/references.txt b/formal-model/urcu-controldataflow-alpha-ipi-compress/references.txt
new file mode 100644 (file)
index 0000000..72c67a2
--- /dev/null
@@ -0,0 +1,13 @@
+http://spinroot.com/spin/Man/ltl.html
+http://en.wikipedia.org/wiki/Linear_temporal_logic
+http://www.dcs.gla.ac.uk/~muffy/MRS4-2002/lect11.ppt
+
+http://www.lsv.ens-cachan.fr/~gastin/ltl2ba/index.php
+http://spinroot.com/spin/Man/index.html
+http://spinroot.com/spin/Man/promela.html
+
+LTL vs CTL :
+
+http://spinroot.com/spin/Doc/course/lecture12.pdf p. 9, p. 15, p. 18
+http://www-i2.informatik.rwth-aachen.de/i2/fileadmin/user_upload/documents/Introduction_to_Model_Checking/mc_lec18.pdf
+  (downloaded)
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu.sh b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu.sh
new file mode 100644 (file)
index 0000000..65ff517
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Compiles and runs the urcu.spin Promela model.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) IBM Corporation, 2009
+#               Mathieu Desnoyers, 2009
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+#          Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+# Basic execution, without LTL clauses. See Makefile.
+
+spin -a urcu.spin
+cc -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w21
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu.spin b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu.spin
new file mode 100644 (file)
index 0000000..8075506
--- /dev/null
@@ -0,0 +1,1321 @@
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free.ltl b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free.ltl
new file mode 100644 (file)
index 0000000..6be1be9
--- /dev/null
@@ -0,0 +1 @@
+[] (!read_poison)
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_nested.define b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_nested.define
new file mode 100644 (file)
index 0000000..0fb59bd
--- /dev/null
@@ -0,0 +1 @@
+#define READER_NEST_LEVEL 2
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_no_mb.define b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_no_mb.define
new file mode 100644 (file)
index 0000000..d99d793
--- /dev/null
@@ -0,0 +1 @@
+#define NO_MB
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_no_rmb.define b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_no_rmb.define
new file mode 100644 (file)
index 0000000..73e61a4
--- /dev/null
@@ -0,0 +1 @@
+#define NO_RMB
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_no_wmb.define b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_no_wmb.define
new file mode 100644 (file)
index 0000000..710f29d
--- /dev/null
@@ -0,0 +1 @@
+#define NO_WMB
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_single_flip.define b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_free_single_flip.define
new file mode 100644 (file)
index 0000000..5e642ef
--- /dev/null
@@ -0,0 +1 @@
+#define SINGLE_FLIP
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress.ltl b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress.ltl
new file mode 100644 (file)
index 0000000..8718641
--- /dev/null
@@ -0,0 +1 @@
+([] <> !np_)
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_reader.define b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_reader.define
new file mode 100644 (file)
index 0000000..ff3f783
--- /dev/null
@@ -0,0 +1 @@
+#define READER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_reader.log b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_reader.log
new file mode 100644 (file)
index 0000000..f11dc83
--- /dev/null
@@ -0,0 +1,303 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi-compress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_reader.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1362)
+depth 7: Claim reached state 9 (line 1367)
+depth 136: Claim reached state 9 (line 1366)
+Depth=  115766 States=    1e+06 Transitions= 2.97e+08 Memory=   495.647        t=    461 R=   2e+03
+Depth=  115766 States=    2e+06 Transitions= 5.97e+08 Memory=   525.529        t=    939 R=   2e+03
+Depth=  115766 States=    3e+06 Transitions= 8.86e+08 Memory=   553.264        t= 1.41e+03 R=   2e+03
+pan: resizing hashtable to -w22..  done
+Depth=  115766 States=    4e+06 Transitions= 1.18e+09 Memory=   614.463        t= 1.87e+03 R=   2e+03
+Depth=  115766 States=    5e+06 Transitions= 1.48e+09 Memory=   644.541        t= 2.34e+03 R=   2e+03
+Depth=  115766 States=    6e+06 Transitions= 1.77e+09 Memory=   674.424        t= 2.8e+03 R=   2e+03
+Depth=  115766 States=    7e+06 Transitions= 2.07e+09 Memory=   701.865        t= 3.27e+03 R=   2e+03
+Depth=  115766 States=    8e+06 Transitions= 2.36e+09 Memory=   729.404        t= 3.75e+03 R=   2e+03
+Depth=  115766 States=    9e+06 Transitions= 2.66e+09 Memory=   757.920        t= 4.23e+03 R=   2e+03
+pan: resizing hashtable to -w24..  done
+Depth=  117893 States=    1e+07 Transitions= 2.95e+09 Memory=   909.846        t= 4.69e+03 R=   2e+03
+Depth=  117893 States=  1.1e+07 Transitions= 3.35e+09 Memory=   937.190        t= 5.31e+03 R=   2e+03
+Depth=  117893 States=  1.2e+07 Transitions= 3.69e+09 Memory=   964.826        t= 5.85e+03 R=   2e+03
+Depth=  117893 States=  1.3e+07 Transitions= 4.02e+09 Memory=   994.026        t= 6.37e+03 R=   2e+03
+Depth=  117893 States=  1.4e+07 Transitions= 4.32e+09 Memory=  1021.858        t= 6.84e+03 R=   2e+03
+Depth=  117893 States=  1.5e+07 Transitions= 4.63e+09 Memory=  1050.080        t= 7.33e+03 R=   2e+03
+Depth=  117893 States=  1.6e+07 Transitions= 4.95e+09 Memory=  1078.693        t= 7.83e+03 R=   2e+03
+Depth=  117893 States=  1.7e+07 Transitions= 5.24e+09 Memory=  1106.135        t= 8.28e+03 R=   2e+03
+Depth=  117893 States=  1.8e+07 Transitions= 5.54e+09 Memory=  1135.920        t= 8.74e+03 R=   2e+03
+Depth=  117893 States=  1.9e+07 Transitions= 5.82e+09 Memory=  1164.338        t= 9.19e+03 R=   2e+03
+Depth=  117893 States=    2e+07 Transitions= 6.12e+09 Memory=  1194.514        t= 9.66e+03 R=   2e+03
+Depth=  117893 States=  2.1e+07 Transitions= 6.42e+09 Memory=  1223.713        t= 1.01e+04 R=   2e+03
+Depth=  117893 States=  2.2e+07 Transitions= 6.72e+09 Memory=  1252.912        t= 1.06e+04 R=   2e+03
+Depth=  117893 States=  2.3e+07 Transitions= 7.01e+09 Memory=  1280.744        t= 1.11e+04 R=   2e+03
+Depth=  117893 States=  2.4e+07 Transitions=  7.3e+09 Memory=  1311.408        t= 1.15e+04 R=   2e+03
+Depth=  117893 States=  2.5e+07 Transitions= 7.61e+09 Memory=  1336.994        t= 1.2e+04 R=   2e+03
+Depth=  117893 States=  2.6e+07 Transitions= 7.89e+09 Memory=  1365.315        t= 1.24e+04 R=   2e+03
+Depth=  117893 States=  2.7e+07 Transitions= 8.35e+09 Memory=  1390.315        t= 1.32e+04 R=   2e+03
+Depth=  117893 States=  2.8e+07 Transitions= 8.84e+09 Memory=  1414.143        t= 1.4e+04 R=   2e+03
+Depth=  117893 States=  2.9e+07 Transitions= 9.64e+09 Memory=  1438.166        t= 1.53e+04 R=   2e+03
+Depth=  117893 States=    3e+07 Transitions= 1.02e+10 Memory=  1462.971        t= 1.62e+04 R=   2e+03
+Depth=  117893 States=  3.1e+07 Transitions= 1.06e+10 Memory=  1488.068        t= 1.69e+04 R=   2e+03
+Depth=  117893 States=  3.2e+07 Transitions= 1.14e+10 Memory=  1512.287        t= 1.82e+04 R=   2e+03
+Depth=  117893 States=  3.3e+07 Transitions= 1.24e+10 Memory=  1536.799        t= 1.99e+04 R=   2e+03
+Depth=  117893 States=  3.4e+07 Transitions= 1.27e+10 Memory=  1567.658        t= 2.04e+04 R=   2e+03
+pan: resizing hashtable to -w26..  done
+Depth=  117893 States=  3.5e+07 Transitions= 1.32e+10 Memory=  2089.424        t= 2.13e+04 R=   2e+03
+Depth=  117893 States=  3.6e+07 Transitions= 1.38e+10 Memory=  2116.768        t= 2.22e+04 R=   2e+03
+Depth=  117893 States=  3.7e+07 Transitions= 1.45e+10 Memory=  2142.158        t= 2.33e+04 R=   2e+03
+Depth=  117893 States=  3.8e+07 Transitions= 1.49e+10 Memory=  2170.674        t= 2.41e+04 R=   2e+03
+Depth=  117893 States=  3.9e+07 Transitions= 1.55e+10 Memory=  2197.725        t= 2.49e+04 R=   2e+03
+Depth=  117893 States=    4e+07 Transitions= 1.63e+10 Memory=  2219.014        t= 2.63e+04 R=   2e+03
+Depth=  117893 States=  4.1e+07 Transitions= 1.69e+10 Memory=  2242.061        t= 2.75e+04 R=   1e+03
+Depth=  117893 States=  4.2e+07 Transitions= 1.73e+10 Memory=  2269.600        t= 2.81e+04 R=   1e+03
+Depth=  117893 States=  4.3e+07 Transitions=  1.8e+10 Memory=  2295.967        t= 2.92e+04 R=   1e+03
+Depth=  117893 States=  4.4e+07 Transitions= 1.85e+10 Memory=  2323.213        t=  3e+04 R=   1e+03
+Depth=  117893 States=  4.5e+07 Transitions= 1.91e+10 Memory=  2348.994        t= 3.09e+04 R=   1e+03
+Depth=  117893 States=  4.6e+07 Transitions= 1.97e+10 Memory=  2374.190        t= 3.2e+04 R=   1e+03
+Depth=  117893 States=  4.7e+07 Transitions= 2.03e+10 Memory=  2399.483        t= 3.3e+04 R=   1e+03
+Depth=  117893 States=  4.8e+07 Transitions= 2.07e+10 Memory=  2428.486        t= 3.36e+04 R=   1e+03
+Depth=  117893 States=  4.9e+07 Transitions= 2.13e+10 Memory=  2454.951        t= 3.45e+04 R=   1e+03
+Depth=  117893 States=    5e+07 Transitions= 2.18e+10 Memory=  2482.686        t= 3.54e+04 R=   1e+03
+Depth=  117893 States=  5.1e+07 Transitions= 2.24e+10 Memory=  2509.151        t= 3.62e+04 R=   1e+03
+Depth=  117893 States=  5.2e+07 Transitions=  2.3e+10 Memory=  2535.420        t= 3.72e+04 R=   1e+03
+Depth=  117893 States=  5.3e+07 Transitions= 2.38e+10 Memory=  2560.518        t= 3.86e+04 R=   1e+03
+Depth=  117893 States=  5.4e+07 Transitions= 2.42e+10 Memory=  2586.592        t= 3.94e+04 R=   1e+03
+Depth=  117893 States=  5.5e+07 Transitions= 2.46e+10 Memory=  2610.713        t=  4e+04 R=   1e+03
+Depth=  117893 States=  5.6e+07 Transitions= 2.49e+10 Memory=  2638.545        t= 4.06e+04 R=   1e+03
+Depth=  117893 States=  5.7e+07 Transitions= 2.53e+10 Memory=  2668.135        t= 4.11e+04 R=   1e+03
+Depth=  117893 States=  5.8e+07 Transitions= 2.56e+10 Memory=  2698.213        t= 4.16e+04 R=   1e+03
+Depth=  117893 States=  5.9e+07 Transitions= 2.59e+10 Memory=  2723.897        t= 4.22e+04 R=   1e+03
+Depth=  117893 States=    6e+07 Transitions= 2.66e+10 Memory=  2747.627        t= 4.33e+04 R=   1e+03
+Depth=  117893 States=  6.1e+07 Transitions=  2.7e+10 Memory=  2774.678        t= 4.39e+04 R=   1e+03
+Depth=  117893 States=  6.2e+07 Transitions= 2.73e+10 Memory=  2803.584        t= 4.45e+04 R=   1e+03
+Depth=  117893 States=  6.3e+07 Transitions= 2.77e+10 Memory=  2831.221        t= 4.51e+04 R=   1e+03
+Depth=  117893 States=  6.4e+07 Transitions=  2.8e+10 Memory=  2860.225        t= 4.56e+04 R=   1e+03
+Depth=  117893 States=  6.5e+07 Transitions= 2.84e+10 Memory=  2885.225        t= 4.62e+04 R=   1e+03
+Depth=  117893 States=  6.6e+07 Transitions= 2.89e+10 Memory=  2908.760        t= 4.71e+04 R=   1e+03
+Depth=  117893 States=  6.7e+07 Transitions= 2.96e+10 Memory=  2932.686        t= 4.82e+04 R=   1e+03
+Depth=  117893 States=  6.8e+07 Transitions= 3.03e+10 Memory=  2956.123        t= 4.96e+04 R=   1e+03
+Depth=  117893 States=  6.9e+07 Transitions= 3.14e+10 Memory=  2985.908        t= 5.13e+04 R=   1e+03
+Depth=  117893 States=    7e+07 Transitions= 3.24e+10 Memory=  3015.303        t= 5.31e+04 R=   1e+03
+Depth=  117893 States=  7.1e+07 Transitions= 3.36e+10 Memory=  3039.912        t= 5.5e+04 R=   1e+03
+Depth=  117893 States=  7.2e+07 Transitions= 3.47e+10 Memory=  3065.401        t= 5.69e+04 R=   1e+03
+Depth=  117893 States=  7.3e+07 Transitions= 3.58e+10 Memory=  3086.690        t= 5.87e+04 R=   1e+03
+Depth=  117893 States=  7.4e+07 Transitions= 3.67e+10 Memory=  3108.272        t= 6.02e+04 R=   1e+03
+Depth=  117893 States=  7.5e+07 Transitions= 3.78e+10 Memory=  3127.608        t= 6.21e+04 R=   1e+03
+Depth=  117893 States=  7.6e+07 Transitions= 3.89e+10 Memory=  3147.627        t= 6.39e+04 R=   1e+03
+Depth=  117893 States=  7.7e+07 Transitions= 3.97e+10 Memory=  3167.647        t= 6.53e+04 R=   1e+03
+Depth=  117893 States=  7.8e+07 Transitions= 4.01e+10 Memory=  3195.772        t= 6.6e+04 R=   1e+03
+Depth=  117893 States=  7.9e+07 Transitions= 4.07e+10 Memory=  3221.065        t= 6.69e+04 R=   1e+03
+Depth=  117893 States=    8e+07 Transitions= 4.14e+10 Memory=  3245.869        t= 6.8e+04 R=   1e+03
+Depth=  117893 States=  8.1e+07 Transitions= 4.26e+10 Memory=  3270.186        t=  7e+04 R=   1e+03
+Depth=  117893 States=  8.2e+07 Transitions= 4.33e+10 Memory=  3293.135        t= 7.12e+04 R=   1e+03
+Depth=  117893 States=  8.3e+07 Transitions= 4.38e+10 Memory=  3323.701        t= 7.21e+04 R=   1e+03
+Depth=  117893 States=  8.4e+07 Transitions= 4.41e+10 Memory=  3350.752        t= 7.26e+04 R=   1e+03
+Depth=  117893 States=  8.5e+07 Transitions= 4.45e+10 Memory=  3375.166        t= 7.31e+04 R=   1e+03
+Depth=  117893 States=  8.6e+07 Transitions= 4.49e+10 Memory=  3397.432        t= 7.37e+04 R=   1e+03
+Depth=  117893 States=  8.7e+07 Transitions= 4.53e+10 Memory=  3424.287        t= 7.44e+04 R=   1e+03
+Depth=  117893 States=  8.8e+07 Transitions= 4.61e+10 Memory=  3449.385        t= 7.58e+04 R=   1e+03
+Depth=  117893 States=  8.9e+07 Transitions= 4.71e+10 Memory=  3469.795        t= 7.74e+04 R=   1e+03
+Depth=  117893 States=    9e+07 Transitions= 4.75e+10 Memory=  3498.018        t= 7.81e+04 R=   1e+03
+Depth=  117893 States=  9.1e+07 Transitions=  4.8e+10 Memory=  3524.678        t= 7.88e+04 R=   1e+03
+Depth=  117893 States=  9.2e+07 Transitions= 4.88e+10 Memory=  3549.776        t= 8.02e+04 R=   1e+03
+Depth=  117893 States=  9.3e+07 Transitions= 4.97e+10 Memory=  3573.897        t= 8.16e+04 R=   1e+03
+Depth=  117893 States=  9.4e+07 Transitions= 5.03e+10 Memory=  3598.311        t= 8.26e+04 R=   1e+03
+Depth=  117893 States=  9.5e+07 Transitions= 5.12e+10 Memory=  3621.358        t= 8.41e+04 R=   1e+03
+Depth=  117893 States=  9.6e+07 Transitions= 5.19e+10 Memory=  3645.381        t= 8.52e+04 R=   1e+03
+Depth=  117893 States=  9.7e+07 Transitions= 5.26e+10 Memory=  3669.502        t= 8.64e+04 R=   1e+03
+Depth=  117893 States=  9.8e+07 Transitions= 5.32e+10 Memory=  3693.233        t= 8.75e+04 R=   1e+03
+Depth=  117893 States=  9.9e+07 Transitions= 5.36e+10 Memory=  3723.115        t= 8.8e+04 R=   1e+03
+Depth=  117893 States=    1e+08 Transitions= 5.41e+10 Memory=  3748.799        t= 8.89e+04 R=   1e+03
+Depth=  117893 States= 1.01e+08 Transitions= 5.51e+10 Memory=  3771.943        t= 9.05e+04 R=   1e+03
+Depth=  117893 States= 1.02e+08 Transitions= 5.59e+10 Memory=  3796.651        t= 9.18e+04 R=   1e+03
+Depth=  117893 States= 1.03e+08 Transitions= 5.64e+10 Memory=  3820.967        t= 9.28e+04 R=   1e+03
+Depth=  117893 States= 1.04e+08 Transitions= 5.71e+10 Memory=  3845.283        t= 9.39e+04 R=   1e+03
+Depth=  117893 States= 1.05e+08 Transitions= 5.76e+10 Memory=  3872.041        t= 9.46e+04 R=   1e+03
+Depth=  117893 States= 1.06e+08 Transitions=  5.8e+10 Memory=  3901.436        t= 9.54e+04 R=   1e+03
+Depth=  117893 States= 1.07e+08 Transitions= 5.85e+10 Memory=  3931.026        t= 9.61e+04 R=   1e+03
+Depth=  117893 States= 1.08e+08 Transitions= 5.89e+10 Memory=  3956.318        t= 9.68e+04 R=   1e+03
+Depth=  117893 States= 1.09e+08 Transitions= 5.96e+10 Memory=  3978.486        t= 9.79e+04 R=   1e+03
+Depth=  117893 States=  1.1e+08 Transitions= 6.02e+10 Memory=  4002.901        t= 9.89e+04 R=   1e+03
+Depth=  117893 States= 1.11e+08 Transitions= 6.06e+10 Memory=  4027.803        t= 9.96e+04 R=   1e+03
+Depth=  117893 States= 1.12e+08 Transitions= 6.12e+10 Memory=  4051.924        t= 1.01e+05 R=   1e+03
+Depth=  117893 States= 1.13e+08 Transitions= 6.16e+10 Memory=  4079.365        t= 1.01e+05 R=   1e+03
+Depth=  117893 States= 1.14e+08 Transitions= 6.22e+10 Memory=  4104.756        t= 1.02e+05 R=   1e+03
+Depth=  117893 States= 1.15e+08 Transitions= 6.28e+10 Memory=  4129.268        t= 1.03e+05 R=   1e+03
+Depth=  117893 States= 1.16e+08 Transitions= 6.38e+10 Memory=  4155.537        t= 1.05e+05 R=   1e+03
+Depth=  117893 States= 1.17e+08 Transitions= 6.44e+10 Memory=  4184.443        t= 1.06e+05 R=   1e+03
+Depth=  117893 States= 1.18e+08 Transitions= 6.48e+10 Memory=  4211.397        t= 1.07e+05 R=   1e+03
+Depth=  117893 States= 1.19e+08 Transitions= 6.51e+10 Memory=  4234.248        t= 1.07e+05 R=   1e+03
+Depth=  117893 States=  1.2e+08 Transitions= 6.58e+10 Memory=  4262.471        t= 1.08e+05 R=   1e+03
+Depth=  117893 States= 1.21e+08 Transitions= 6.65e+10 Memory=  4289.912        t= 1.17e+05 R=   1e+03
+Depth=  117893 States= 1.22e+08 Transitions= 6.69e+10 Memory=  4315.791        t= 1.17e+05 R=   1e+03
+Depth=  117893 States= 1.23e+08 Transitions= 6.74e+10 Memory=  4343.330        t= 1.18e+05 R=   1e+03
+Depth=  117893 States= 1.24e+08 Transitions= 6.83e+10 Memory=  4368.623        t= 1.19e+05 R=   1e+03
+Depth=  117893 States= 1.25e+08 Transitions= 6.86e+10 Memory=  4395.674        t= 1.2e+05 R=   1e+03
+Depth=  117893 States= 1.26e+08 Transitions=  6.9e+10 Memory=  4419.990        t= 1.21e+05 R=   1e+03
+Depth=  117893 States= 1.27e+08 Transitions= 6.94e+10 Memory=  4448.115        t= 1.21e+05 R=   1e+03
+Depth=  117893 States= 1.28e+08 Transitions= 7.02e+10 Memory=  4473.994        t= 1.23e+05 R=   1e+03
+Depth=  117893 States= 1.29e+08 Transitions= 7.05e+10 Memory=  4500.947        t= 1.23e+05 R=   1e+03
+Depth=  117893 States=  1.3e+08 Transitions= 7.09e+10 Memory=  4526.826        t= 1.24e+05 R=   1e+03
+Depth=  117893 States= 1.31e+08 Transitions= 7.18e+10 Memory=  4553.291        t= 1.25e+05 R=   1e+03
+Depth=  117893 States= 1.32e+08 Transitions= 7.21e+10 Memory=  4581.221        t= 1.26e+05 R=   1e+03
+Depth=  117893 States= 1.33e+08 Transitions= 7.25e+10 Memory=  4605.830        t= 1.26e+05 R=   1e+03
+Depth=  117893 States= 1.34e+08 Transitions=  7.3e+10 Memory=  4632.197        t= 1.27e+05 R=   1e+03
+Depth=  117893 States= 1.35e+08 Transitions= 7.35e+10 Memory=  4657.588        t= 1.33e+05 R=   1e+03
+pan: resizing hashtable to -w28..  done
+Depth=  117893 States= 1.36e+08 Transitions= 7.38e+10 Memory=  6705.588        t= 1.33e+05 R=   1e+03
+Depth=  117893 States= 1.37e+08 Transitions= 7.46e+10 Memory=  6705.588        t= 1.35e+05 R=   1e+03
+Depth=  117893 States= 1.38e+08 Transitions= 7.54e+10 Memory=  6720.627        t= 1.36e+05 R=   1e+03
+Depth=  117893 States= 1.39e+08 Transitions= 7.57e+10 Memory=  6750.217        t= 1.37e+05 R=   1e+03
+Depth=  117893 States=  1.4e+08 Transitions= 7.61e+10 Memory=  6775.803        t= 1.37e+05 R=   1e+03
+Depth=  117893 States= 1.41e+08 Transitions= 7.65e+10 Memory=  6797.190        t= 1.38e+05 R=   1e+03
+Depth=  117893 States= 1.42e+08 Transitions= 7.71e+10 Memory=  6823.068        t= 1.39e+05 R=   1e+03
+Depth=  117893 States= 1.43e+08 Transitions= 7.76e+10 Memory=  6851.682        t= 1.4e+05 R=   1e+03
+Depth=  117893 States= 1.44e+08 Transitions= 7.83e+10 Memory=  6879.611        t= 1.41e+05 R=   1e+03
+Depth=  117893 States= 1.45e+08 Transitions= 7.87e+10 Memory=  6905.100        t= 1.41e+05 R=   1e+03
+Depth=  117893 States= 1.46e+08 Transitions= 7.91e+10 Memory=  6929.318        t= 1.42e+05 R=   1e+03
+Depth=  117893 States= 1.47e+08 Transitions= 7.96e+10 Memory=  6954.709        t= 1.43e+05 R=   1e+03
+Depth=  117893 States= 1.48e+08 Transitions=    8e+10 Memory=  6983.225        t= 1.44e+05 R=   1e+03
+Depth=  117893 States= 1.49e+08 Transitions= 8.05e+10 Memory=  7013.791        t= 1.44e+05 R=   1e+03
+Depth=  117893 States=  1.5e+08 Transitions= 8.15e+10 Memory=  7037.033        t= 1.46e+05 R=   1e+03
+Depth=  117893 States= 1.51e+08 Transitions= 8.19e+10 Memory=  7068.381        t= 1.47e+05 R=   1e+03
+Depth=  117893 States= 1.52e+08 Transitions= 8.22e+10 Memory=  7093.576        t= 1.47e+05 R=   1e+03
+Depth=  117893 States= 1.53e+08 Transitions= 8.26e+10 Memory=  7115.842        t= 1.48e+05 R=   1e+03
+Depth=  117893 States= 1.54e+08 Transitions=  8.3e+10 Memory=  7144.358        t= 1.48e+05 R=   1e+03
+Depth=  117893 States= 1.55e+08 Transitions= 8.39e+10 Memory=  7170.432        t= 1.5e+05 R=   1e+03
+Depth=  117893 States= 1.56e+08 Transitions= 8.42e+10 Memory=  7197.483        t= 1.5e+05 R=   1e+03
+Depth=  117893 States= 1.57e+08 Transitions= 8.46e+10 Memory=  7220.725        t= 1.51e+05 R=   1e+03
+Depth=  117893 States= 1.58e+08 Transitions= 8.51e+10 Memory=  7246.701        t= 1.52e+05 R=   1e+03
+Depth=  117893 States= 1.59e+08 Transitions= 8.55e+10 Memory=  7273.752        t= 1.52e+05 R=   1e+03
+Depth=  117893 States=  1.6e+08 Transitions= 8.58e+10 Memory=  7302.951        t= 1.53e+05 R=   1e+03
+Depth=  117893 States= 1.61e+08 Transitions= 8.64e+10 Memory=  7327.365        t= 1.54e+05 R=   1e+03
+Depth=  117893 States= 1.62e+08 Transitions= 8.69e+10 Memory=  7353.049        t= 1.55e+05 R=   1e+03
+Depth=  117893 States= 1.63e+08 Transitions= 8.73e+10 Memory=  7379.318        t= 1.55e+05 R=   1e+03
+Depth=  117893 States= 1.64e+08 Transitions= 8.77e+10 Memory=  7404.904        t= 1.56e+05 R=   1e+03
+Depth=  117893 States= 1.65e+08 Transitions= 8.82e+10 Memory=  7429.514        t= 1.57e+05 R=   1e+03
+Depth=  117893 States= 1.66e+08 Transitions= 8.87e+10 Memory=  7453.733        t= 1.58e+05 R=   1e+03
+Depth=  117893 States= 1.67e+08 Transitions= 8.95e+10 Memory=  7477.561        t= 1.59e+05 R=   1e+03
+Depth=  117893 States= 1.68e+08 Transitions= 9.02e+10 Memory=  7501.096        t= 1.6e+05 R=   1e+03
+Depth=  117893 States= 1.69e+08 Transitions= 9.07e+10 Memory=  7527.365        t= 1.61e+05 R=   1e+03
+Depth=  117893 States=  1.7e+08 Transitions= 9.11e+10 Memory=  7553.830        t= 1.61e+05 R=   1e+03
+Depth=  117893 States= 1.71e+08 Transitions= 9.16e+10 Memory=  7577.951        t= 1.62e+05 R=   1e+03
+Depth=  117893 States= 1.72e+08 Transitions= 9.24e+10 Memory=  7602.658        t= 1.63e+05 R=   1e+03
+Depth=  117893 States= 1.73e+08 Transitions= 9.35e+10 Memory=  7626.096        t= 1.65e+05 R=   1e+03
+Depth=  117893 States= 1.74e+08 Transitions= 9.39e+10 Memory=  7656.369        t= 1.66e+05 R=   1e+03
+Depth=  117893 States= 1.75e+08 Transitions= 9.43e+10 Memory=  7682.834        t= 1.67e+05 R=   1e+03
+Depth=  117893 States= 1.76e+08 Transitions= 9.49e+10 Memory=  7709.201        t= 1.67e+05 R=   1e+03
+Depth=  117893 States= 1.77e+08 Transitions= 9.55e+10 Memory=  7736.447        t= 1.68e+05 R=   1e+03
+Depth=  117893 States= 1.78e+08 Transitions= 9.61e+10 Memory=  7763.791        t= 1.69e+05 R=   1e+03
+Depth=  117893 States= 1.79e+08 Transitions= 9.66e+10 Memory=  7790.061        t= 1.7e+05 R=   1e+03
+Depth=  117893 States=  1.8e+08 Transitions= 9.72e+10 Memory=  7817.600        t= 1.71e+05 R=   1e+03
+Depth=  117893 States= 1.81e+08 Transitions=  9.8e+10 Memory=  7837.912        t= 1.73e+05 R=   1e+03
+Depth=  117893 States= 1.82e+08 Transitions= 9.88e+10 Memory=  7858.127        t= 1.74e+05 R=   1e+03
+Depth=  117893 States= 1.83e+08 Transitions= 9.95e+10 Memory=  7886.154        t= 1.75e+05 R=   1e+03
+Depth=  117893 States= 1.84e+08 Transitions=    1e+11 Memory=  7912.522        t= 1.76e+05 R=   1e+03
+Depth=  117893 States= 1.85e+08 Transitions= 1.01e+11 Memory=  7937.717        t= 1.77e+05 R=   1e+03
+Depth=  117893 States= 1.86e+08 Transitions= 1.02e+11 Memory=  7962.815        t= 1.78e+05 R=   1e+03
+Depth=  117893 States= 1.87e+08 Transitions= 1.02e+11 Memory=  7988.010        t= 1.79e+05 R=   1e+03
+Depth=  117893 States= 1.88e+08 Transitions= 1.03e+11 Memory=  8014.768        t= 1.8e+05 R=   1e+03
+Depth=  117893 States= 1.89e+08 Transitions= 1.03e+11 Memory=  8040.061        t= 1.81e+05 R=   1e+03
+Depth=  117893 States=  1.9e+08 Transitions= 1.04e+11 Memory=  8066.818        t= 1.82e+05 R=   1e+03
+Depth=  117893 States= 1.91e+08 Transitions= 1.05e+11 Memory=  8090.451        t= 1.83e+05 R=   1e+03
+Depth=  117893 States= 1.92e+08 Transitions= 1.05e+11 Memory=  8116.330        t= 1.84e+05 R=   1e+03
+Depth=  117893 States= 1.93e+08 Transitions= 1.06e+11 Memory=  8144.748        t= 1.85e+05 R=   1e+03
+Depth=  117893 States= 1.94e+08 Transitions= 1.07e+11 Memory=  8170.822        t= 1.86e+05 R=   1e+03
+Depth=  117893 States= 1.95e+08 Transitions= 1.07e+11 Memory=  8196.604        t= 1.87e+05 R=   1e+03
+Depth=  117893 States= 1.96e+08 Transitions= 1.07e+11 Memory=  8222.873        t= 1.87e+05 R=   1e+03
+Depth=  117893 States= 1.97e+08 Transitions= 1.08e+11 Memory=  8252.658        t= 1.88e+05 R=   1e+03
+Depth=  117893 States= 1.98e+08 Transitions= 1.08e+11 Memory=  8278.147        t= 1.89e+05 R=   1e+03
+Depth=  117893 States= 1.99e+08 Transitions= 1.08e+11 Memory=  8305.490        t= 1.89e+05 R=   1e+03
+Depth=  117893 States=    2e+08 Transitions= 1.09e+11 Memory=  8332.151        t= 1.9e+05 R=   1e+03
+Depth=  117893 States= 2.01e+08 Transitions= 1.09e+11 Memory=  8355.979        t= 1.91e+05 R=   1e+03
+Depth=  117893 States= 2.02e+08 Transitions=  1.1e+11 Memory=  8383.225        t= 1.91e+05 R=   1e+03
+Depth=  117893 States= 2.03e+08 Transitions=  1.1e+11 Memory=  8410.959        t= 1.92e+05 R=   1e+03
+Depth=  117893 States= 2.04e+08 Transitions= 1.11e+11 Memory=  8437.912        t= 1.93e+05 R=   1e+03
+Depth=  117893 States= 2.05e+08 Transitions= 1.11e+11 Memory=  8465.256        t= 1.94e+05 R=   1e+03
+Depth=  117893 States= 2.06e+08 Transitions= 1.12e+11 Memory=  8489.670        t= 1.94e+05 R=   1e+03
+Depth=  117893 States= 2.07e+08 Transitions= 1.12e+11 Memory=  8517.600        t= 1.95e+05 R=   1e+03
+Depth=  117893 States= 2.08e+08 Transitions= 1.12e+11 Memory=  8543.088        t= 1.96e+05 R=   1e+03
+Depth=  117893 States= 2.09e+08 Transitions= 1.13e+11 Memory=  8566.233        t= 1.97e+05 R=   1e+03
+Depth=  117893 States=  2.1e+08 Transitions= 1.14e+11 Memory=  8590.744        t= 1.98e+05 R=   1e+03
+Depth=  117893 States= 2.11e+08 Transitions= 1.14e+11 Memory=  8613.693        t= 1.99e+05 R=   1e+03
+Depth=  117893 States= 2.12e+08 Transitions= 1.15e+11 Memory=  8642.502        t= 2.01e+05 R=   1e+03
+Depth=  117893 States= 2.13e+08 Transitions= 1.16e+11 Memory=  8672.873        t= 2.02e+05 R=   1e+03
+Depth=  117893 States= 2.14e+08 Transitions= 1.18e+11 Memory=  8697.483        t= 2.04e+05 R=   1e+03
+Depth=  117893 States= 2.15e+08 Transitions= 1.19e+11 Memory=  8722.190        t= 2.06e+05 R=   1e+03
+Depth=  117893 States= 2.16e+08 Transitions=  1.2e+11 Memory=  8744.455        t= 2.08e+05 R=   1e+03
+Depth=  117893 States= 2.17e+08 Transitions= 1.21e+11 Memory=  8765.842        t= 2.09e+05 R=   1e+03
+Depth=  117893 States= 2.18e+08 Transitions= 1.22e+11 Memory=  8785.861        t= 2.11e+05 R=   1e+03
+Depth=  117893 States= 2.19e+08 Transitions= 1.23e+11 Memory=  8804.904        t= 2.13e+05 R=   1e+03
+Depth=  117893 States=  2.2e+08 Transitions= 1.24e+11 Memory=  8823.850        t= 2.15e+05 R=   1e+03
+Depth=  117893 States= 2.21e+08 Transitions= 1.24e+11 Memory=  8850.705        t= 2.15e+05 R=   1e+03
+Depth=  117893 States= 2.22e+08 Transitions= 1.25e+11 Memory=  8877.365        t= 2.16e+05 R=   1e+03
+Depth=  117893 States= 2.23e+08 Transitions= 1.25e+11 Memory=  8902.268        t= 2.17e+05 R=   1e+03
+Depth=  117893 States= 2.24e+08 Transitions= 1.26e+11 Memory=  8927.268        t= 2.19e+05 R=   1e+03
+Depth=  117893 States= 2.25e+08 Transitions= 1.27e+11 Memory=  8949.143        t= 2.2e+05 R=   1e+03
+Depth=  117893 States= 2.26e+08 Transitions= 1.28e+11 Memory=  8973.459        t= 2.22e+05 R=   1e+03
+Depth=  117893 States= 2.27e+08 Transitions= 1.29e+11 Memory=  9004.807        t= 2.22e+05 R=   1e+03
+Depth=  117893 States= 2.28e+08 Transitions= 1.29e+11 Memory=  9031.174        t= 2.23e+05 R=   1e+03
+Depth=  117893 States= 2.29e+08 Transitions= 1.29e+11 Memory=  9054.709        t= 2.23e+05 R=   1e+03
+Depth=  117893 States=  2.3e+08 Transitions=  1.3e+11 Memory=  9077.756        t= 2.24e+05 R=   1e+03
+Depth=  117893 States= 2.31e+08 Transitions=  1.3e+11 Memory=  9103.830        t= 2.25e+05 R=   1e+03
+Depth=  117893 States= 2.32e+08 Transitions= 1.31e+11 Memory=  9128.830        t= 2.26e+05 R=   1e+03
+Depth=  117893 States= 2.33e+08 Transitions= 1.32e+11 Memory=  9150.315        t= 2.28e+05 R=   1e+03
+Depth=  117893 States= 2.34e+08 Transitions= 1.32e+11 Memory=  9175.705        t= 2.29e+05 R=   1e+03
+Depth=  117893 States= 2.35e+08 Transitions= 1.33e+11 Memory=  9201.096        t= 2.3e+05 R=   1e+03
+Depth=  117893 States= 2.36e+08 Transitions= 1.34e+11 Memory=  9226.682        t= 2.31e+05 R=   1e+03
+Depth=  117893 States= 2.37e+08 Transitions= 1.34e+11 Memory=  9253.049        t= 2.31e+05 R=   1e+03
+Depth=  117893 States= 2.38e+08 Transitions= 1.35e+11 Memory=  9276.291        t= 2.33e+05 R=   1e+03
+Depth=  117893 States= 2.39e+08 Transitions= 1.36e+11 Memory=  9300.608        t= 2.34e+05 R=   1e+03
+Depth=  117893 States=  2.4e+08 Transitions= 1.37e+11 Memory=  9324.240        t= 2.35e+05 R=   1e+03
+Depth=  117893 States= 2.41e+08 Transitions= 1.37e+11 Memory=  9348.361        t= 2.37e+05 R=   1e+03
+Depth=  117893 States= 2.42e+08 Transitions= 1.38e+11 Memory=  9372.873        t= 2.38e+05 R=   1e+03
+Depth=  117893 States= 2.43e+08 Transitions= 1.39e+11 Memory=  9396.604        t= 2.39e+05 R=   1e+03
+Depth=  117893 States= 2.44e+08 Transitions= 1.39e+11 Memory=  9421.994        t= 2.4e+05 R=   1e+03
+Depth=  117893 States= 2.45e+08 Transitions=  1.4e+11 Memory=  9450.998        t= 2.41e+05 R=   1e+03
+Depth=  117893 States= 2.46e+08 Transitions= 1.41e+11 Memory=  9475.217        t= 2.42e+05 R=   1e+03
+Depth=  117893 States= 2.47e+08 Transitions= 1.41e+11 Memory=  9498.264        t= 2.43e+05 R=   1e+03
+Depth=  117893 States= 2.48e+08 Transitions= 1.42e+11 Memory=  9522.580        t= 2.44e+05 R=   1e+03
+Depth=  117893 States= 2.49e+08 Transitions= 1.42e+11 Memory=  9549.924        t= 2.45e+05 R=   1e+03
+Depth=  117893 States=  2.5e+08 Transitions= 1.43e+11 Memory=  9579.807        t= 2.45e+05 R=   1e+03
+Depth=  117893 States= 2.51e+08 Transitions= 1.43e+11 Memory=  9609.104        t= 2.46e+05 R=   1e+03
+Depth=  117893 States= 2.52e+08 Transitions= 1.44e+11 Memory=  9631.858        t= 2.47e+05 R=   1e+03
+Depth=  117893 States= 2.53e+08 Transitions= 1.45e+11 Memory=  9652.756        t= 2.48e+05 R=   1e+03
+Depth=  117893 States= 2.54e+08 Transitions= 1.45e+11 Memory=  9678.733        t= 2.49e+05 R=   1e+03
+Depth=  117893 States= 2.55e+08 Transitions= 1.45e+11 Memory=  9703.733        t= 2.5e+05 R=   1e+03
+Depth=  117893 States= 2.56e+08 Transitions= 1.46e+11 Memory=  9728.440        t= 2.5e+05 R=   1e+03
+Depth=  117893 States= 2.57e+08 Transitions= 1.46e+11 Memory=  9757.834        t= 2.51e+05 R=   1e+03
+Depth=  117893 States= 2.58e+08 Transitions= 1.47e+11 Memory=  9784.006        t= 2.52e+05 R=   1e+03
+Depth=  117893 States= 2.59e+08 Transitions= 1.48e+11 Memory=  9810.959        t= 2.54e+05 R=   1e+03
+Depth=  117893 States=  2.6e+08 Transitions= 1.48e+11 Memory=  9839.475        t= 2.54e+05 R=   1e+03
+Depth=  117893 States= 2.61e+08 Transitions= 1.49e+11 Memory=  9862.717        t= 2.55e+05 R=   1e+03
+Depth=  117893 States= 2.62e+08 Transitions= 1.49e+11 Memory=  9891.428        t= 2.56e+05 R=   1e+03
+Depth=  117893 States= 2.63e+08 Transitions=  1.5e+11 Memory=  9916.623        t= 2.57e+05 R=   1e+03
+Depth=  117893 States= 2.64e+08 Transitions=  1.5e+11 Memory=  9944.162        t= 2.58e+05 R=   1e+03
+Depth=  117893 States= 2.65e+08 Transitions= 1.51e+11 Memory=  9969.358        t= 2.58e+05 R=   1e+03
+Depth=  117893 States= 2.66e+08 Transitions= 1.52e+11 Memory=  9995.432        t= 2.6e+05 R=   1e+03
+Depth=  117893 States= 2.67e+08 Transitions= 1.52e+11 Memory= 10023.654        t= 2.6e+05 R=   1e+03
+Depth=  117893 States= 2.68e+08 Transitions= 1.52e+11 Memory= 10047.580        t= 2.61e+05 R=   1e+03
+Depth=  117893 States= 2.69e+08 Transitions= 1.53e+11 Memory= 10075.022        t= 2.62e+05 R=   1e+03
+Depth=  117893 States=  2.7e+08 Transitions= 1.54e+11 Memory= 10101.779        t= 2.63e+05 R=   1e+03
+Depth=  117893 States= 2.71e+08 Transitions= 1.54e+11 Memory= 10129.221        t= 2.64e+05 R=   1e+03
+Depth=  117893 States= 2.72e+08 Transitions= 1.54e+11 Memory= 10154.416        t= 2.64e+05 R=   1e+03
+Depth=  117893 States= 2.73e+08 Transitions= 1.55e+11 Memory= 10180.393        t= 2.66e+05 R=   1e+03
+Depth=  117893 States= 2.74e+08 Transitions= 1.56e+11 Memory= 10209.104        t= 2.66e+05 R=   1e+03
+Depth=  117893 States= 2.75e+08 Transitions= 1.56e+11 Memory= 10232.541        t= 2.67e+05 R=   1e+03
+Depth=  117893 States= 2.76e+08 Transitions= 1.57e+11 Memory= 10259.690        t= 2.68e+05 R=   1e+03
+Depth=  117893 States= 2.77e+08 Transitions= 1.57e+11 Memory= 10285.080        t= 2.68e+05 R=   1e+03
+Depth=  117893 States= 2.78e+08 Transitions= 1.57e+11 Memory= 10311.154        t= 2.69e+05 R=   1e+03
+Depth=  117893 States= 2.79e+08 Transitions= 1.58e+11 Memory= 10337.033        t= 2.71e+05 R=   1e+03
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer.define b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer.define
new file mode 100644 (file)
index 0000000..1e4417f
--- /dev/null
@@ -0,0 +1 @@
+#define WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer.log b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer.log
new file mode 100644 (file)
index 0000000..f9e72c0
--- /dev/null
@@ -0,0 +1,810 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi-compress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1362)
+depth 7: Claim reached state 9 (line 1367)
+depth 50: Claim reached state 9 (line 1366)
+Depth=    7605 States=    1e+06 Transitions= 3.21e+08 Memory=   493.010        t=    500 R=   2e+03
+Depth=    7605 States=    2e+06 Transitions= 6.35e+08 Memory=   518.401        t=    996 R=   2e+03
+Depth=    7605 States=    3e+06 Transitions= 9.46e+08 Memory=   549.455        t= 1.5e+03 R=   2e+03
+pan: resizing hashtable to -w22..  done
+Depth=    7605 States=    4e+06 Transitions= 1.25e+09 Memory=   609.776        t= 1.97e+03 R=   2e+03
+Depth=    9389 States=    5e+06 Transitions= 1.56e+09 Memory=   634.873        t= 2.46e+03 R=   2e+03
+Depth=    9389 States=    6e+06 Transitions= 1.88e+09 Memory=   662.315        t= 2.97e+03 R=   2e+03
+Depth=    9389 States=    7e+06 Transitions= 2.25e+09 Memory=   688.193        t= 3.58e+03 R=   2e+03
+Depth=    9389 States=    8e+06 Transitions= 2.61e+09 Memory=   716.611        t= 4.18e+03 R=   2e+03
+Depth=    9389 States=    9e+06 Transitions= 2.96e+09 Memory=   743.662        t= 4.75e+03 R=   2e+03
+pan: resizing hashtable to -w24..  done
+Depth=    9389 States=    1e+07 Transitions= 3.29e+09 Memory=   894.611        t= 5.26e+03 R=   2e+03
+Depth=    9389 States=  1.1e+07 Transitions= 3.62e+09 Memory=   921.076        t= 5.77e+03 R=   2e+03
+Depth=    9389 States=  1.2e+07 Transitions= 3.93e+09 Memory=   947.053        t= 6.27e+03 R=   2e+03
+Depth=    9389 States=  1.3e+07 Transitions= 4.25e+09 Memory=   974.299        t= 6.76e+03 R=   2e+03
+Depth=    9389 States=  1.4e+07 Transitions= 4.55e+09 Memory=  1004.963        t= 7.23e+03 R=   2e+03
+Depth=    9389 States=  1.5e+07 Transitions= 4.87e+09 Memory=  1029.963        t= 7.73e+03 R=   2e+03
+Depth=    9389 States=  1.6e+07 Transitions= 5.18e+09 Memory=  1058.088        t= 8.23e+03 R=   2e+03
+Depth=    9389 States=  1.7e+07 Transitions= 5.48e+09 Memory=  1087.580        t= 8.71e+03 R=   2e+03
+Depth=    9389 States=  1.8e+07 Transitions= 5.79e+09 Memory=  1113.166        t= 9.19e+03 R=   2e+03
+Depth=    9389 States=  1.9e+07 Transitions=  6.1e+09 Memory=  1139.143        t= 9.68e+03 R=   2e+03
+Depth=    9389 States=    2e+07 Transitions= 6.59e+09 Memory=  1164.436        t= 1.05e+04 R=   2e+03
+Depth=    9389 States=  2.1e+07 Transitions= 7.19e+09 Memory=  1188.557        t= 1.15e+04 R=   2e+03
+Depth=    9389 States=  2.2e+07 Transitions= 8.02e+09 Memory=  1210.236        t= 1.28e+04 R=   2e+03
+Depth=    9389 States=  2.3e+07 Transitions= 8.41e+09 Memory=  1237.385        t= 1.35e+04 R=   2e+03
+Depth=    9389 States=  2.4e+07 Transitions= 8.96e+09 Memory=  1261.604        t= 1.44e+04 R=   2e+03
+Depth=    9389 States=  2.5e+07 Transitions= 9.91e+09 Memory=  1287.287        t= 1.6e+04 R=   2e+03
+Depth=    9389 States=  2.6e+07 Transitions= 1.06e+10 Memory=  1312.971        t= 1.71e+04 R=   2e+03
+Depth=    9600 States=  2.7e+07 Transitions=  1.1e+10 Memory=  1340.510        t= 1.78e+04 R=   2e+03
+Depth=    9600 States=  2.8e+07 Transitions= 1.15e+10 Memory=  1367.756        t= 1.86e+04 R=   2e+03
+Depth=    9600 States=  2.9e+07 Transitions= 1.21e+10 Memory=  1395.100        t= 1.96e+04 R=   1e+03
+Depth=    9600 States=    3e+07 Transitions= 1.27e+10 Memory=  1422.639        t= 2.05e+04 R=   1e+03
+Depth=    9600 States=  3.1e+07 Transitions= 1.32e+10 Memory=  1448.908        t= 2.15e+04 R=   1e+03
+Depth=    9600 States=  3.2e+07 Transitions= 1.39e+10 Memory=  1474.494        t= 2.25e+04 R=   1e+03
+Depth=    9600 States=  3.3e+07 Transitions= 1.46e+10 Memory=  1494.807        t= 2.38e+04 R=   1e+03
+Depth=    9600 States=  3.4e+07 Transitions= 1.52e+10 Memory=  1519.709        t= 2.47e+04 R=   1e+03
+pan: resizing hashtable to -w26..  done
+Depth=    9600 States=  3.5e+07 Transitions= 1.58e+10 Memory=  2042.061        t= 2.57e+04 R=   1e+03
+Depth=    9600 States=  3.6e+07 Transitions= 1.63e+10 Memory=  2068.916        t= 2.66e+04 R=   1e+03
+Depth=    9600 States=  3.7e+07 Transitions=  1.7e+10 Memory=  2094.404        t= 2.75e+04 R=   1e+03
+Depth=    9600 States=  3.8e+07 Transitions= 1.76e+10 Memory=  2119.893        t= 2.86e+04 R=   1e+03
+Depth=    9600 States=  3.9e+07 Transitions= 1.82e+10 Memory=  2144.697        t= 2.96e+04 R=   1e+03
+Depth=    9600 States=    4e+07 Transitions= 1.87e+10 Memory=  2173.018        t= 3.03e+04 R=   1e+03
+Depth=    9600 States=  4.1e+07 Transitions= 1.93e+10 Memory=  2200.068        t= 3.13e+04 R=   1e+03
+Depth=    9600 States=  4.2e+07 Transitions= 1.98e+10 Memory=  2225.459        t= 3.22e+04 R=   1e+03
+Depth=    9600 States=  4.3e+07 Transitions= 2.05e+10 Memory=  2252.217        t= 3.32e+04 R=   1e+03
+Depth=    9600 States=  4.4e+07 Transitions= 2.13e+10 Memory=  2275.557        t= 3.45e+04 R=   1e+03
+Depth=    9600 States=  4.5e+07 Transitions= 2.18e+10 Memory=  2303.096        t= 3.53e+04 R=   1e+03
+Depth=    9600 States=  4.6e+07 Transitions= 2.22e+10 Memory=  2327.608        t= 3.59e+04 R=   1e+03
+Depth=    9600 States=  4.7e+07 Transitions= 2.25e+10 Memory=  2355.342        t= 3.65e+04 R=   1e+03
+Depth=    9600 States=  4.8e+07 Transitions= 2.28e+10 Memory=  2385.029        t= 3.7e+04 R=   1e+03
+Depth=    9600 States=  4.9e+07 Transitions= 2.32e+10 Memory=  2413.643        t= 3.75e+04 R=   1e+03
+Depth=    9600 States=    5e+07 Transitions= 2.35e+10 Memory=  2439.522        t= 3.81e+04 R=   1e+03
+Depth=    9600 States=  5.1e+07 Transitions= 2.42e+10 Memory=  2462.959        t= 3.91e+04 R=   1e+03
+Depth=    9600 States=  5.2e+07 Transitions= 2.46e+10 Memory=  2490.498        t= 3.97e+04 R=   1e+03
+Depth=    9600 States=  5.3e+07 Transitions= 2.49e+10 Memory=  2519.600        t= 4.03e+04 R=   1e+03
+Depth=    9600 States=  5.4e+07 Transitions= 2.53e+10 Memory=  2545.674        t= 4.09e+04 R=   1e+03
+Depth=    9600 States=  5.5e+07 Transitions= 2.56e+10 Memory=  2574.190        t= 4.13e+04 R=   1e+03
+Depth=    9600 States=  5.6e+07 Transitions=  2.6e+10 Memory=  2598.604        t= 4.2e+04 R=   1e+03
+Depth=    9600 States=  5.7e+07 Transitions= 2.66e+10 Memory=  2622.334        t= 4.3e+04 R=   1e+03
+Depth=    9600 States=  5.8e+07 Transitions= 2.73e+10 Memory=  2645.869        t= 4.41e+04 R=   1e+03
+Depth=    9600 States=  5.9e+07 Transitions= 2.81e+10 Memory=  2668.623        t= 4.54e+04 R=   1e+03
+Depth=    9600 States=    6e+07 Transitions= 2.91e+10 Memory=  2701.631        t= 4.72e+04 R=   1e+03
+Depth=    9600 States=  6.1e+07 Transitions= 3.02e+10 Memory=  2729.170        t= 4.9e+04 R=   1e+03
+Depth=    9600 States=  6.2e+07 Transitions= 3.14e+10 Memory=  2754.072        t= 5.09e+04 R=   1e+03
+Depth=    9600 States=  6.3e+07 Transitions= 3.25e+10 Memory=  2778.682        t= 5.29e+04 R=   1e+03
+Depth=    9600 States=  6.4e+07 Transitions= 3.35e+10 Memory=  2799.190        t= 5.46e+04 R=   1e+03
+Depth=    9600 States=  6.5e+07 Transitions= 3.45e+10 Memory=  2820.869        t= 5.61e+04 R=   1e+03
+Depth=    9600 States=  6.6e+07 Transitions= 3.56e+10 Memory=  2840.401        t= 5.81e+04 R=   1e+03
+Depth=    9600 States=  6.7e+07 Transitions= 3.66e+10 Memory=  2859.443        t= 5.98e+04 R=   1e+03
+Depth=    9600 States=  6.8e+07 Transitions= 3.74e+10 Memory=  2881.807        t= 6.1e+04 R=   1e+03
+Depth=    9600 States=  6.9e+07 Transitions= 3.78e+10 Memory=  2909.053        t= 6.17e+04 R=   1e+03
+Depth=    9600 States=    7e+07 Transitions= 3.84e+10 Memory=  2934.151        t= 6.27e+04 R=   1e+03
+Depth=    9600 States=  7.1e+07 Transitions= 3.94e+10 Memory=  2959.053        t= 6.43e+04 R=   1e+03
+Depth=    9600 States=  7.2e+07 Transitions= 4.05e+10 Memory=  2980.049        t= 6.62e+04 R=   1e+03
+Depth=    9600 States=  7.3e+07 Transitions= 4.11e+10 Memory=  3007.197        t= 6.72e+04 R=   1e+03
+Depth=    9600 States=  7.4e+07 Transitions= 4.14e+10 Memory=  3037.276        t= 6.78e+04 R=   1e+03
+Depth=    9600 States=  7.5e+07 Transitions= 4.18e+10 Memory=  3063.252        t= 6.83e+04 R=   1e+03
+Depth=    9600 States=  7.6e+07 Transitions= 4.21e+10 Memory=  3087.276        t= 6.89e+04 R=   1e+03
+Depth=    9600 States=  7.7e+07 Transitions= 4.26e+10 Memory=  3112.178        t= 6.95e+04 R=   1e+03
+Depth=    9600 States=  7.8e+07 Transitions=  4.3e+10 Memory=  3137.764        t= 7.03e+04 R=   1e+03
+Depth=    9600 States=  7.9e+07 Transitions=  4.4e+10 Memory=  3162.178        t= 7.2e+04 R=   1e+03
+Depth=    9600 States=    8e+07 Transitions= 4.48e+10 Memory=  3185.420        t= 7.32e+04 R=   1e+03
+Depth=    9600 States=  8.1e+07 Transitions= 4.53e+10 Memory=  3212.276        t= 7.4e+04 R=   1e+03
+Depth=    9600 States=  8.2e+07 Transitions=  4.6e+10 Memory=  3237.178        t= 7.52e+04 R=   1e+03
+Depth=    9600 States=  8.3e+07 Transitions=  4.7e+10 Memory=  3261.299        t= 7.69e+04 R=   1e+03
+Depth=    9600 States=  8.4e+07 Transitions= 4.75e+10 Memory=  3286.104        t= 7.76e+04 R=   1e+03
+Depth=    9600 States=  8.5e+07 Transitions= 4.84e+10 Memory=  3309.639        t= 7.91e+04 R=   1e+03
+Depth=    9600 States=  8.6e+07 Transitions= 4.91e+10 Memory=  3333.272        t= 8.04e+04 R=   1e+03
+Depth=    9600 States=  8.7e+07 Transitions= 4.98e+10 Memory=  3358.272        t= 8.15e+04 R=   1e+03
+Depth=    9600 States=  8.8e+07 Transitions= 5.04e+10 Memory=  3382.490        t= 8.25e+04 R=   1e+03
+Depth=    9600 States=  8.9e+07 Transitions= 5.09e+10 Memory=  3410.908        t= 8.32e+04 R=   1e+03
+Depth=    9600 States=    9e+07 Transitions= 5.14e+10 Memory=  3437.959        t= 8.41e+04 R=   1e+03
+Depth=    9600 States=  9.1e+07 Transitions= 5.23e+10 Memory=  3460.908        t= 8.56e+04 R=   1e+03
+Depth=    9600 States=  9.2e+07 Transitions= 5.32e+10 Memory=  3484.639        t= 8.71e+04 R=   1e+03
+Depth=    9600 States=  9.3e+07 Transitions= 5.37e+10 Memory=  3509.932        t= 8.79e+04 R=   1e+03
+Depth=    9600 States=  9.4e+07 Transitions= 5.44e+10 Memory=  3534.346        t= 8.91e+04 R=   1e+03
+Depth=    9600 States=  9.5e+07 Transitions= 5.49e+10 Memory=  3561.299        t= 8.98e+04 R=   1e+03
+Depth=    9600 States=  9.6e+07 Transitions= 5.53e+10 Memory=  3589.522        t= 9.06e+04 R=   1e+03
+Depth=    9600 States=  9.7e+07 Transitions= 5.58e+10 Memory=  3619.209        t= 9.14e+04 R=   1e+03
+Depth=    9600 States=  9.8e+07 Transitions= 5.62e+10 Memory=  3645.576        t= 9.2e+04 R=   1e+03
+Depth=    9600 States=  9.9e+07 Transitions= 5.68e+10 Memory=  3668.623        t= 9.31e+04 R=   1e+03
+Depth=    9600 States=    1e+08 Transitions= 5.75e+10 Memory=  3692.061        t= 9.41e+04 R=   1e+03
+Depth=    9600 States= 1.01e+08 Transitions= 5.79e+10 Memory=  3717.940        t= 9.48e+04 R=   1e+03
+Depth=    9600 States= 1.02e+08 Transitions= 5.85e+10 Memory=  3741.963        t= 9.57e+04 R=   1e+03
+Depth=    9600 States= 1.03e+08 Transitions= 5.89e+10 Memory=  3768.428        t= 9.64e+04 R=   1e+03
+Depth=    9600 States= 1.04e+08 Transitions= 5.95e+10 Memory=  3795.186        t= 9.75e+04 R=   1e+03
+Depth=    9600 States= 1.05e+08 Transitions= 6.01e+10 Memory=  3820.283        t= 9.84e+04 R=   1e+03
+Depth=    9600 States= 1.06e+08 Transitions= 6.09e+10 Memory=  3846.358        t= 9.98e+04 R=   1e+03
+Depth=   10157 States= 1.07e+08 Transitions= 6.17e+10 Memory=  3873.408        t= 1.01e+05 R=   1e+03
+Depth=   10157 States= 1.08e+08 Transitions= 6.21e+10 Memory=  3901.924        t= 1.02e+05 R=   1e+03
+Depth=   10157 States= 1.09e+08 Transitions= 6.24e+10 Memory=  3925.850        t= 1.02e+05 R=   1e+03
+Depth=   10157 States=  1.1e+08 Transitions= 6.29e+10 Memory=  3955.244        t= 1.03e+05 R=   1e+03
+Depth=   10193 States= 1.11e+08 Transitions= 6.38e+10 Memory=  3979.854        t= 1.05e+05 R=   1e+03
+Depth=   10193 States= 1.12e+08 Transitions= 6.42e+10 Memory=  4007.686        t= 1.05e+05 R=   1e+03
+Depth=   10193 States= 1.13e+08 Transitions= 6.45e+10 Memory=  4033.076        t= 1.06e+05 R=   1e+03
+Depth=   10193 States= 1.14e+08 Transitions= 6.55e+10 Memory=  4060.127        t= 1.07e+05 R=   1e+03
+Depth=   10193 States= 1.15e+08 Transitions= 6.59e+10 Memory=  4088.057        t= 1.08e+05 R=   1e+03
+Depth=   10193 States= 1.16e+08 Transitions= 6.63e+10 Memory=  4112.276        t= 1.09e+05 R=   1e+03
+Depth=   10193 States= 1.17e+08 Transitions= 6.66e+10 Memory=  4140.986        t= 1.09e+05 R=   1e+03
+Depth=   10193 States= 1.18e+08 Transitions= 6.73e+10 Memory=  4167.158        t= 1.1e+05 R=   1e+03
+Depth=   10193 States= 1.19e+08 Transitions= 6.78e+10 Memory=  4195.088        t= 1.11e+05 R=   1e+03
+Depth=   10193 States=  1.2e+08 Transitions= 6.82e+10 Memory=  4220.674        t= 1.12e+05 R=   1e+03
+Depth=   10193 States= 1.21e+08 Transitions=  6.9e+10 Memory=  4247.041        t= 1.13e+05 R=   1e+03
+Depth=   10193 States= 1.22e+08 Transitions= 6.94e+10 Memory=  4275.166        t= 1.14e+05 R=   1e+03
+Depth=   10193 States= 1.23e+08 Transitions= 6.98e+10 Memory=  4299.287        t= 1.14e+05 R=   1e+03
+Depth=   10193 States= 1.24e+08 Transitions= 7.03e+10 Memory=  4326.143        t= 1.15e+05 R=   1e+03
+Depth=   10193 States= 1.25e+08 Transitions= 7.08e+10 Memory=  4352.901        t= 1.16e+05 R=   1e+03
+Depth=   10193 States= 1.26e+08 Transitions= 7.11e+10 Memory=  4380.440        t= 1.17e+05 R=   1e+03
+Depth=   10193 States= 1.27e+08 Transitions= 7.17e+10 Memory=  4407.490        t= 1.18e+05 R=   1e+03
+Depth=   10193 States= 1.28e+08 Transitions= 7.26e+10 Memory=  4431.026        t= 1.19e+05 R=   1e+03
+Depth=   10193 States= 1.29e+08 Transitions=  7.3e+10 Memory=  4461.787        t= 1.2e+05 R=   1e+03
+Depth=   10193 States=  1.3e+08 Transitions= 7.34e+10 Memory=  4487.373        t= 1.2e+05 R=   1e+03
+Depth=   10193 States= 1.31e+08 Transitions= 7.37e+10 Memory=  4511.006        t= 1.21e+05 R=   1e+03
+Depth=   10193 States= 1.32e+08 Transitions= 7.43e+10 Memory=  4536.494        t= 1.22e+05 R=   1e+03
+Depth=   10193 States= 1.33e+08 Transitions= 7.47e+10 Memory=  4564.815        t= 1.23e+05 R=   1e+03
+Depth=   10193 States= 1.34e+08 Transitions= 7.56e+10 Memory=  4590.498        t= 1.24e+05 R=   1e+03
+Depth=   10193 States= 1.35e+08 Transitions=  7.6e+10 Memory=  4619.209        t= 1.25e+05 R=   1e+03
+pan: resizing hashtable to -w28..  done
+Depth=   10193 States= 1.36e+08 Transitions= 7.63e+10 Memory=  6667.209        t= 1.25e+05 R=   1e+03
+Depth=   10193 States= 1.37e+08 Transitions= 7.68e+10 Memory=  6667.209        t= 1.26e+05 R=   1e+03
+Depth=   10193 States= 1.38e+08 Transitions= 7.73e+10 Memory=  6680.490        t= 1.27e+05 R=   1e+03
+Depth=   10193 States= 1.39e+08 Transitions= 7.76e+10 Memory=  6710.178        t= 1.27e+05 R=   1e+03
+Depth=   10193 States=  1.4e+08 Transitions= 7.86e+10 Memory=  6736.252        t= 1.29e+05 R=   1e+03
+Depth=   10193 States= 1.41e+08 Transitions= 7.91e+10 Memory=  6765.647        t= 1.3e+05 R=   1e+03
+Depth=   10193 States= 1.42e+08 Transitions= 7.95e+10 Memory=  6792.697        t= 1.3e+05 R=   1e+03
+Depth=   10193 States= 1.43e+08 Transitions= 7.98e+10 Memory=  6816.818        t= 1.31e+05 R=   1e+03
+Depth=   10193 States= 1.44e+08 Transitions= 8.02e+10 Memory=  6842.893        t= 1.32e+05 R=   1e+03
+Depth=   10193 States= 1.45e+08 Transitions=  8.1e+10 Memory=  6870.236        t= 1.33e+05 R=   1e+03
+Depth=   10193 States= 1.46e+08 Transitions= 8.15e+10 Memory=  6898.361        t= 1.34e+05 R=   1e+03
+Depth=   10193 States= 1.47e+08 Transitions= 8.18e+10 Memory=  6922.092        t= 1.34e+05 R=   1e+03
+Depth=   10193 States= 1.48e+08 Transitions= 8.23e+10 Memory=  6948.557        t= 1.35e+05 R=   1e+03
+Depth=   10193 States= 1.49e+08 Transitions= 8.28e+10 Memory=  6974.533        t= 1.36e+05 R=   1e+03
+Depth=   10193 States=  1.5e+08 Transitions= 8.31e+10 Memory=  7004.221        t= 1.36e+05 R=   1e+03
+Depth=   10193 States= 1.51e+08 Transitions= 8.36e+10 Memory=  7029.416        t= 1.37e+05 R=   1e+03
+Depth=   10193 States= 1.52e+08 Transitions= 8.41e+10 Memory=  7055.588        t= 1.38e+05 R=   1e+03
+Depth=   10193 States= 1.53e+08 Transitions= 8.45e+10 Memory=  7081.760        t= 1.39e+05 R=   1e+03
+Depth=   10193 States= 1.54e+08 Transitions=  8.5e+10 Memory=  7107.834        t= 1.39e+05 R=   1e+03
+Depth=   10193 States= 1.55e+08 Transitions= 8.54e+10 Memory=  7134.201        t= 1.4e+05 R=   1e+03
+Depth=   10193 States= 1.56e+08 Transitions= 8.59e+10 Memory=  7158.713        t= 1.41e+05 R=   1e+03
+Depth=   10193 States= 1.57e+08 Transitions= 8.66e+10 Memory=  7181.662        t= 1.42e+05 R=   1e+03
+Depth=   10193 States= 1.58e+08 Transitions= 8.74e+10 Memory=  7204.221        t= 1.43e+05 R=   1e+03
+Depth=   10193 States= 1.59e+08 Transitions= 8.78e+10 Memory=  7230.295        t= 1.44e+05 R=   1e+03
+Depth=   10193 States=  1.6e+08 Transitions= 8.83e+10 Memory=  7256.662        t= 1.45e+05 R=   1e+03
+Depth=   10193 States= 1.61e+08 Transitions= 8.87e+10 Memory=  7283.127        t= 1.45e+05 R=   1e+03
+Depth=   10193 States= 1.62e+08 Transitions= 8.94e+10 Memory=  7306.760        t= 1.47e+05 R=   1e+03
+Depth=   10193 States= 1.63e+08 Transitions= 9.03e+10 Memory=  7331.565        t= 1.48e+05 R=   1e+03
+Depth=   10193 States= 1.64e+08 Transitions= 9.12e+10 Memory=  7356.955        t= 1.49e+05 R=   1e+03
+Depth=   10193 States= 1.65e+08 Transitions= 9.15e+10 Memory=  7384.983        t= 1.5e+05 R=   1e+03
+Depth=   10193 States= 1.66e+08 Transitions=  9.2e+10 Memory=  7412.033        t= 1.51e+05 R=   1e+03
+Depth=   10193 States= 1.67e+08 Transitions= 9.26e+10 Memory=  7439.377        t= 1.52e+05 R=   1e+03
+Depth=   10193 States= 1.68e+08 Transitions= 9.32e+10 Memory=  7465.256        t= 1.53e+05 R=   1e+03
+Depth=   10193 States= 1.69e+08 Transitions= 9.37e+10 Memory=  7493.479        t= 1.54e+05 R=   1e+03
+Depth=   10193 States=  1.7e+08 Transitions= 9.44e+10 Memory=  7520.041        t= 1.55e+05 R=   1e+03
+Depth=   10193 States= 1.71e+08 Transitions=  9.5e+10 Memory=  7544.065        t= 1.56e+05 R=   1e+03
+Depth=   10193 States= 1.72e+08 Transitions= 9.59e+10 Memory=  7564.377        t= 1.57e+05 R=   1e+03
+Depth=   10193 States= 1.73e+08 Transitions= 9.66e+10 Memory=  7588.693        t= 1.58e+05 R=   1e+03
+Depth=   10193 States= 1.74e+08 Transitions= 9.73e+10 Memory=  7614.865        t= 1.59e+05 R=   1e+03
+Depth=   10193 States= 1.75e+08 Transitions= 9.81e+10 Memory=  7640.354        t= 1.61e+05 R=   1e+03
+Depth=   10193 States= 1.76e+08 Transitions= 9.87e+10 Memory=  7665.744        t= 1.62e+05 R=   1e+03
+Depth=   10193 States= 1.77e+08 Transitions= 9.93e+10 Memory=  7691.135        t= 1.63e+05 R=   1e+03
+Depth=   10193 States= 1.78e+08 Transitions= 9.99e+10 Memory=  7718.479        t= 1.64e+05 R=   1e+03
+Depth=   10193 States= 1.79e+08 Transitions= 1.01e+11 Memory=  7743.772        t= 1.65e+05 R=   1e+03
+Depth=   10193 States=  1.8e+08 Transitions= 1.01e+11 Memory=  7769.651        t= 1.66e+05 R=   1e+03
+Depth=   10193 States= 1.81e+08 Transitions= 1.02e+11 Memory=  7795.920        t= 1.67e+05 R=   1e+03
+Depth=   10193 States= 1.82e+08 Transitions= 1.03e+11 Memory=  7819.651        t= 1.68e+05 R=   1e+03
+Depth=   10193 States= 1.83e+08 Transitions= 1.03e+11 Memory=  7846.897        t= 1.69e+05 R=   1e+03
+Depth=   10193 States= 1.84e+08 Transitions= 1.04e+11 Memory=  7874.631        t= 1.7e+05 R=   1e+03
+Depth=   10193 States= 1.85e+08 Transitions= 1.04e+11 Memory=  7900.510        t= 1.7e+05 R=   1e+03
+Depth=   10193 States= 1.86e+08 Transitions= 1.05e+11 Memory=  7925.803        t= 1.71e+05 R=   1e+03
+Depth=   10193 States= 1.87e+08 Transitions= 1.05e+11 Memory=  7955.295        t= 1.72e+05 R=   1e+03
+Depth=   10193 States= 1.88e+08 Transitions= 1.05e+11 Memory=  7983.420        t= 1.72e+05 R=   1e+03
+Depth=   10193 States= 1.89e+08 Transitions= 1.06e+11 Memory=  8010.373        t= 1.73e+05 R=   1e+03
+Depth=   10193 States=  1.9e+08 Transitions= 1.06e+11 Memory=  8037.424        t= 1.73e+05 R=   1e+03
+Depth=   10193 States= 1.91e+08 Transitions= 1.07e+11 Memory=  8061.838        t= 1.74e+05 R=   1e+03
+Depth=   10193 States= 1.92e+08 Transitions= 1.07e+11 Memory=  8087.326        t= 1.75e+05 R=   1e+03
+Depth=   10193 States= 1.93e+08 Transitions= 1.08e+11 Memory=  8115.256        t= 1.76e+05 R=   1e+03
+Depth=   10193 States= 1.94e+08 Transitions= 1.08e+11 Memory=  8142.307        t= 1.76e+05 R=   1e+03
+Depth=   10193 States= 1.95e+08 Transitions= 1.08e+11 Memory=  8168.479        t= 1.77e+05 R=   1e+03
+Depth=   10193 States= 1.96e+08 Transitions= 1.09e+11 Memory=  8194.846        t= 1.78e+05 R=   1e+03
+Depth=   10193 States= 1.97e+08 Transitions= 1.09e+11 Memory=  8221.311        t= 1.79e+05 R=   1e+03
+Depth=   10193 States= 1.98e+08 Transitions=  1.1e+11 Memory=  8249.240        t= 1.79e+05 R=   1e+03
+Depth=   10193 States= 1.99e+08 Transitions=  1.1e+11 Memory=  8273.264        t= 1.8e+05 R=   1e+03
+Depth=   10193 States=    2e+08 Transitions= 1.11e+11 Memory=  8295.920        t= 1.81e+05 R=   1e+03
+Depth=   10193 States= 2.01e+08 Transitions= 1.11e+11 Memory=  8320.529        t= 1.82e+05 R=   1e+03
+Depth=   10193 States= 2.02e+08 Transitions= 1.12e+11 Memory=  8345.236        t= 1.84e+05 R=   1e+03
+Depth=   10193 States= 2.03e+08 Transitions= 1.13e+11 Memory=  8379.026        t= 1.86e+05 R=   1e+03
+Depth=   10193 States= 2.04e+08 Transitions= 1.15e+11 Memory=  8403.635        t= 1.87e+05 R=   1e+03
+Depth=   10193 States= 2.05e+08 Transitions= 1.16e+11 Memory=  8427.951        t= 1.89e+05 R=   1e+03
+Depth=   10193 States= 2.06e+08 Transitions= 1.17e+11 Memory=  8452.463        t= 1.91e+05 R=   1e+03
+Depth=   10193 States= 2.07e+08 Transitions= 1.18e+11 Memory=  8472.483        t= 1.93e+05 R=   1e+03
+Depth=   10193 States= 2.08e+08 Transitions= 1.19e+11 Memory=  8493.283        t= 1.94e+05 R=   1e+03
+Depth=   10193 States= 2.09e+08 Transitions=  1.2e+11 Memory=  8513.205        t= 1.96e+05 R=   1e+03
+Depth=   10193 States=  2.1e+08 Transitions= 1.21e+11 Memory=  8532.053        t= 1.98e+05 R=   1e+03
+Depth=   10193 States= 2.11e+08 Transitions= 1.22e+11 Memory=  8555.588        t= 1.99e+05 R=   1e+03
+Depth=   10193 States= 2.12e+08 Transitions= 1.22e+11 Memory=  8582.932        t=  2e+05 R=   1e+03
+Depth=   10193 States= 2.13e+08 Transitions= 1.23e+11 Memory=  8607.834        t= 2.01e+05 R=   1e+03
+Depth=   10193 States= 2.14e+08 Transitions= 1.23e+11 Memory=  8633.029        t= 2.02e+05 R=   1e+03
+Depth=   10193 States= 2.15e+08 Transitions= 1.24e+11 Memory=  8656.858        t= 2.04e+05 R=   1e+03
+Depth=   10193 States= 2.16e+08 Transitions= 1.25e+11 Memory=  8679.904        t= 2.05e+05 R=   1e+03
+Depth=   10193 States= 2.17e+08 Transitions= 1.26e+11 Memory=  8707.932        t= 2.06e+05 R=   1e+03
+Depth=   10193 States= 2.18e+08 Transitions= 1.26e+11 Memory=  8736.740        t= 2.07e+05 R=   1e+03
+Depth=   10193 States= 2.19e+08 Transitions= 1.26e+11 Memory=  8762.229        t= 2.07e+05 R=   1e+03
+Depth=   10193 States=  2.2e+08 Transitions= 1.27e+11 Memory=  8785.764        t= 2.08e+05 R=   1e+03
+Depth=   10193 States= 2.21e+08 Transitions= 1.27e+11 Memory=  8812.619        t= 2.08e+05 R=   1e+03
+Depth=   10193 States= 2.22e+08 Transitions= 1.28e+11 Memory=  8837.326        t= 2.09e+05 R=   1e+03
+Depth=   10193 States= 2.23e+08 Transitions= 1.29e+11 Memory=  8859.006        t= 2.11e+05 R=   1e+03
+Depth=   10193 States= 2.24e+08 Transitions=  1.3e+11 Memory=  8884.104        t= 2.12e+05 R=   1e+03
+Depth=   10193 States= 2.25e+08 Transitions=  1.3e+11 Memory=  8909.006        t= 2.13e+05 R=   1e+03
+Depth=   10193 States= 2.26e+08 Transitions= 1.31e+11 Memory=  8934.787        t= 2.14e+05 R=   1e+03
+Depth=   10193 States= 2.27e+08 Transitions= 1.31e+11 Memory=  8961.057        t= 2.15e+05 R=   1e+03
+Depth=   10193 States= 2.28e+08 Transitions= 1.32e+11 Memory=  8985.666        t= 2.16e+05 R=   1e+03
+Depth=   10193 States= 2.29e+08 Transitions= 1.33e+11 Memory=  9010.080        t= 2.18e+05 R=   1e+03
+Depth=   10193 States=  2.3e+08 Transitions= 1.34e+11 Memory=  9034.885        t= 2.19e+05 R=   1e+03
+Depth=   10193 States= 2.31e+08 Transitions= 1.35e+11 Memory=  9058.127        t= 2.2e+05 R=   1e+03
+Depth=   10193 States= 2.32e+08 Transitions= 1.35e+11 Memory=  9081.760        t= 2.22e+05 R=   1e+03
+Depth=   10193 States= 2.33e+08 Transitions= 1.36e+11 Memory=  9106.467        t= 2.23e+05 R=   1e+03
+Depth=   10193 States= 2.34e+08 Transitions= 1.37e+11 Memory=  9130.295        t= 2.24e+05 R=   1e+03
+Depth=   10193 States= 2.35e+08 Transitions= 1.37e+11 Memory=  9159.690        t= 2.24e+05 R=   1e+03
+Depth=   10193 States= 2.36e+08 Transitions= 1.38e+11 Memory=  9185.471        t= 2.25e+05 R=   1e+03
+Depth=   10193 States= 2.37e+08 Transitions= 1.38e+11 Memory=  9207.639        t= 2.27e+05 R=   1e+03
+Depth=   10193 States= 2.38e+08 Transitions= 1.39e+11 Memory=  9232.443        t= 2.28e+05 R=   1e+03
+Depth=   10193 States= 2.39e+08 Transitions=  1.4e+11 Memory=  9258.811        t= 2.29e+05 R=   1e+03
+Depth=   10193 States=  2.4e+08 Transitions=  1.4e+11 Memory=  9287.619        t= 2.29e+05 R=   1e+03
+Depth=   10193 States= 2.41e+08 Transitions=  1.4e+11 Memory=  9317.209        t= 2.3e+05 R=   1e+03
+Depth=   10193 States= 2.42e+08 Transitions= 1.41e+11 Memory=  9343.088        t= 2.31e+05 R=   1e+03
+Depth=   10193 States= 2.43e+08 Transitions= 1.42e+11 Memory=  9365.354        t= 2.32e+05 R=   1e+03
+Depth=   10193 States= 2.44e+08 Transitions= 1.42e+11 Memory=  9389.182        t= 2.33e+05 R=   1e+03
+Depth=   10193 States= 2.45e+08 Transitions= 1.43e+11 Memory=  9415.256        t= 2.33e+05 R=   1e+03
+Depth=   10193 States= 2.46e+08 Transitions= 1.43e+11 Memory=  9438.596        t= 2.34e+05 R=   1e+03
+Depth=   10193 States= 2.47e+08 Transitions= 1.44e+11 Memory=  9466.623        t= 2.35e+05 R=   1e+03
+Depth=   10193 States= 2.48e+08 Transitions= 1.44e+11 Memory=  9495.041        t= 2.36e+05 R=   1e+03
+Depth=   10193 States= 2.49e+08 Transitions= 1.45e+11 Memory=  9519.651        t= 2.37e+05 R=   1e+03
+Depth=   10193 States=  2.5e+08 Transitions= 1.46e+11 Memory=  9549.436        t= 2.38e+05 R=   1e+03
+Depth=   10193 States= 2.51e+08 Transitions= 1.46e+11 Memory=  9575.022        t= 2.39e+05 R=   1e+03
+Depth=   10193 States= 2.52e+08 Transitions= 1.46e+11 Memory=  9598.752        t= 2.39e+05 R=   1e+03
+Depth=   10193 States= 2.53e+08 Transitions= 1.47e+11 Memory=  9626.584        t= 2.41e+05 R=   1e+03
+Depth=   10193 States= 2.54e+08 Transitions= 1.48e+11 Memory=  9655.588        t= 2.42e+05 R=   1e+03
+Depth=   10193 States= 2.55e+08 Transitions= 1.48e+11 Memory=  9679.904        t= 2.42e+05 R=   1e+03
+Depth=   10193 States= 2.56e+08 Transitions= 1.49e+11 Memory=  9707.834        t= 2.43e+05 R=   1e+03
+Depth=   10193 States= 2.57e+08 Transitions= 1.49e+11 Memory=  9735.568        t= 2.45e+05 R=   1e+03
+Depth=   10193 States= 2.58e+08 Transitions=  1.5e+11 Memory=  9760.861        t= 2.45e+05 R=   1e+03
+Depth=   10193 States= 2.59e+08 Transitions=  1.5e+11 Memory=  9785.959        t= 2.46e+05 R=   1e+03
+Depth=   10193 States=  2.6e+08 Transitions=  1.5e+11 Memory=  9814.182        t= 2.46e+05 R=   1e+03
+Depth=   10193 States= 2.61e+08 Transitions= 1.51e+11 Memory=  9841.428        t= 2.48e+05 R=   1e+03
+Depth=   10193 States= 2.62e+08 Transitions= 1.52e+11 Memory=  9867.014        t= 2.48e+05 R=   1e+03
+Depth=   10193 States= 2.63e+08 Transitions= 1.52e+11 Memory=  9894.846        t= 2.49e+05 R=   1e+03
+Depth=   10193 States= 2.64e+08 Transitions= 1.53e+11 Memory=  9921.604        t= 2.5e+05 R=   1e+03
+Depth=   10193 States= 2.65e+08 Transitions= 1.53e+11 Memory=  9947.287        t= 2.51e+05 R=   1e+03
+Depth=   10193 States= 2.66e+08 Transitions= 1.54e+11 Memory=  9973.361        t= 2.51e+05 R=   1e+03
+Depth=   10193 States= 2.67e+08 Transitions= 1.54e+11 Memory=  9999.240        t= 2.52e+05 R=   1e+03
+Depth=   10193 States= 2.68e+08 Transitions= 1.55e+11 Memory= 10026.584        t= 2.53e+05 R=   1e+03
+Depth=   10193 States= 2.69e+08 Transitions= 1.55e+11 Memory= 10054.416        t= 2.54e+05 R=   1e+03
+Depth=   10193 States=  2.7e+08 Transitions= 1.56e+11 Memory= 10077.658        t= 2.56e+05 R=   1e+03
+Depth=   10193 States= 2.71e+08 Transitions= 1.56e+11 Memory= 10108.615        t= 2.56e+05 R=   1e+03
+Depth=   10193 States= 2.72e+08 Transitions= 1.57e+11 Memory= 10134.494        t= 2.57e+05 R=   1e+03
+Depth=   10193 States= 2.73e+08 Transitions= 1.57e+11 Memory= 10157.639        t= 2.57e+05 R=   1e+03
+Depth=   10193 States= 2.74e+08 Transitions= 1.58e+11 Memory= 10183.127        t= 2.58e+05 R=   1e+03
+Depth=   10193 States= 2.75e+08 Transitions= 1.58e+11 Memory= 10212.522        t= 2.59e+05 R=   1e+03
+Depth=   10193 States= 2.76e+08 Transitions= 1.59e+11 Memory= 10238.596        t= 2.6e+05 R=   1e+03
+Depth=   10193 States= 2.77e+08 Transitions= 1.59e+11 Memory= 10265.647        t= 2.61e+05 R=   1e+03
+Depth=   10193 States= 2.78e+08 Transitions=  1.6e+11 Memory= 10289.963        t= 2.62e+05 R=   1e+03
+Depth=   10193 States= 2.79e+08 Transitions=  1.6e+11 Memory= 10316.233        t= 2.62e+05 R=   1e+03
+Depth=   10193 States=  2.8e+08 Transitions= 1.61e+11 Memory= 10344.651        t= 2.63e+05 R=   1e+03
+Depth=   10193 States= 2.81e+08 Transitions= 1.61e+11 Memory= 10375.315        t= 2.64e+05 R=   1e+03
+Depth=   10193 States= 2.82e+08 Transitions= 1.62e+11 Memory= 10398.654        t= 2.65e+05 R=   1e+03
+Depth=   10193 States= 2.83e+08 Transitions= 1.62e+11 Memory= 10429.611        t= 2.66e+05 R=   1e+03
+Depth=   10193 States= 2.84e+08 Transitions= 1.63e+11 Memory= 10455.588        t= 2.67e+05 R=   1e+03
+Depth=   10193 States= 2.85e+08 Transitions= 1.63e+11 Memory= 10478.830        t= 2.67e+05 R=   1e+03
+Depth=   10193 States= 2.86e+08 Transitions= 1.64e+11 Memory= 10506.760        t= 2.68e+05 R=   1e+03
+Depth=   10193 States= 2.87e+08 Transitions= 1.64e+11 Memory= 10532.443        t= 2.69e+05 R=   1e+03
+Depth=   10193 States= 2.88e+08 Transitions= 1.65e+11 Memory= 10561.252        t= 2.7e+05 R=   1e+03
+Depth=   10193 States= 2.89e+08 Transitions= 1.65e+11 Memory= 10584.983        t= 2.71e+05 R=   1e+03
+Depth=   10193 States=  2.9e+08 Transitions= 1.66e+11 Memory= 10611.252        t= 2.71e+05 R=   1e+03
+Depth=   10193 States= 2.91e+08 Transitions= 1.66e+11 Memory= 10637.033        t= 2.72e+05 R=   1e+03
+Depth=   10193 States= 2.92e+08 Transitions= 1.66e+11 Memory= 10668.674        t= 2.73e+05 R=   1e+03
+Depth=   10193 States= 2.93e+08 Transitions= 1.67e+11 Memory= 10692.111        t= 2.73e+05 R=   1e+03
+Depth=   10193 States= 2.94e+08 Transitions= 1.67e+11 Memory= 10718.283        t= 2.74e+05 R=   1e+03
+Depth=   10193 States= 2.95e+08 Transitions= 1.68e+11 Memory= 10744.748        t= 2.75e+05 R=   1e+03
+Depth=   10193 States= 2.96e+08 Transitions= 1.68e+11 Memory= 10770.920        t= 2.76e+05 R=   1e+03
+Depth=   10193 States= 2.97e+08 Transitions= 1.69e+11 Memory= 10795.139        t= 2.76e+05 R=   1e+03
+Depth=   10193 States= 2.98e+08 Transitions= 1.69e+11 Memory= 10819.358        t= 2.77e+05 R=   1e+03
+Depth=   10193 States= 2.99e+08 Transitions=  1.7e+11 Memory= 10841.721        t= 2.79e+05 R=   1e+03
+Depth=   10193 States=    3e+08 Transitions= 1.71e+11 Memory= 10867.209        t= 2.79e+05 R=   1e+03
+Depth=   10193 States= 3.01e+08 Transitions= 1.71e+11 Memory= 10893.283        t= 2.8e+05 R=   1e+03
+Depth=   10193 States= 3.02e+08 Transitions= 1.72e+11 Memory= 10916.233        t= 2.81e+05 R=   1e+03
+Depth=   10193 States= 3.03e+08 Transitions= 1.73e+11 Memory= 10940.549        t= 2.83e+05 R=   1e+03
+Depth=   10193 States= 3.04e+08 Transitions= 1.73e+11 Memory= 10971.408        t= 2.84e+05 R=   1e+03
+Depth=   10193 States= 3.05e+08 Transitions= 1.74e+11 Memory= 10996.897        t= 2.84e+05 R=   1e+03
+Depth=   10193 States= 3.06e+08 Transitions= 1.74e+11 Memory= 11023.850        t= 2.85e+05 R=   1e+03
+Depth=   10193 States= 3.07e+08 Transitions= 1.75e+11 Memory= 11049.436        t= 2.86e+05 R=   1e+03
+Depth=   10193 States= 3.08e+08 Transitions= 1.75e+11 Memory= 11077.854        t= 2.87e+05 R=   1e+03
+Depth=   10193 States= 3.09e+08 Transitions= 1.76e+11 Memory= 11104.514        t= 2.88e+05 R=   1e+03
+Depth=   10193 States=  3.1e+08 Transitions= 1.77e+11 Memory= 11125.119        t= 2.89e+05 R=   1e+03
+Depth=   10193 States= 3.11e+08 Transitions= 1.77e+11 Memory= 11146.897        t= 2.91e+05 R=   1e+03
+Depth=   10193 States= 3.12e+08 Transitions= 1.78e+11 Memory= 11173.264        t= 2.92e+05 R=   1e+03
+Depth=   10193 States= 3.13e+08 Transitions= 1.79e+11 Memory= 11198.654        t= 2.93e+05 R=   1e+03
+Depth=   10193 States= 3.14e+08 Transitions=  1.8e+11 Memory= 11224.045        t= 2.94e+05 R=   1e+03
+Depth=   10193 States= 3.15e+08 Transitions=  1.8e+11 Memory= 11248.850        t= 2.95e+05 R=   1e+03
+Depth=   10193 States= 3.16e+08 Transitions= 1.81e+11 Memory= 11274.729        t= 2.96e+05 R=   1e+03
+Depth=   10193 States= 3.17e+08 Transitions= 1.82e+11 Memory= 11300.315        t= 2.97e+05 R=   1e+03
+Depth=   10193 States= 3.18e+08 Transitions= 1.82e+11 Memory= 11326.193        t= 2.98e+05 R=   1e+03
+Depth=   10193 States= 3.19e+08 Transitions= 1.83e+11 Memory= 11351.584        t= 2.99e+05 R=   1e+03
+Depth=   10193 States=  3.2e+08 Transitions= 1.83e+11 Memory= 11377.561        t=  3e+05 R=   1e+03
+Depth=   10193 States= 3.21e+08 Transitions= 1.84e+11 Memory= 11400.510        t= 3.01e+05 R=   1e+03
+Depth=   10193 States= 3.22e+08 Transitions= 1.85e+11 Memory= 11425.803        t= 3.02e+05 R=   1e+03
+Depth=   10193 States= 3.23e+08 Transitions= 1.85e+11 Memory= 11453.928        t= 3.03e+05 R=   1e+03
+Depth=   10193 States= 3.24e+08 Transitions= 1.86e+11 Memory= 11478.635        t= 3.04e+05 R=   1e+03
+Depth=   10193 States= 3.25e+08 Transitions= 1.87e+11 Memory= 11503.830        t= 3.05e+05 R=   1e+03
+Depth=   10193 States= 3.26e+08 Transitions= 1.87e+11 Memory= 11528.830        t= 3.06e+05 R=   1e+03
+Depth=   10193 States= 3.27e+08 Transitions= 1.88e+11 Memory= 11554.416        t= 3.07e+05 R=   1e+03
+Depth=   10193 States= 3.28e+08 Transitions= 1.89e+11 Memory= 11580.686        t= 3.08e+05 R=   1e+03
+Depth=   10193 States= 3.29e+08 Transitions= 1.89e+11 Memory= 11606.467        t= 3.09e+05 R=   1e+03
+Depth=   10193 States=  3.3e+08 Transitions=  1.9e+11 Memory= 11632.346        t= 3.1e+05 R=   1e+03
+Depth=   10193 States= 3.31e+08 Transitions=  1.9e+11 Memory= 11658.420        t= 3.11e+05 R=   1e+03
+Depth=   10193 States= 3.32e+08 Transitions= 1.91e+11 Memory= 11681.662        t= 3.13e+05 R=   1e+03
+Depth=   10193 States= 3.33e+08 Transitions= 1.92e+11 Memory= 11708.713        t= 3.13e+05 R=   1e+03
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 10193, errors: 0
+1.8778602e+08 states, stored (3.33973e+08 visited)
+1.9159243e+11 states, matched
+1.919264e+11 transitions (= visited+matched)
+1.0808296e+12 atomic steps
+hash conflicts: 3.7119584e+10 (resolved)
+
+Stats on memory usage (in Megabytes):
+20774.057      equivalent memory usage for states (stored*(State-vector + overhead))
+ 9228.353      actual memory usage for states (compression: 44.42%)
+               state-vector as stored = 16 byte + 36 byte overhead
+ 2048.000      memory used for hash table (-w28)
+  457.764      memory used for DFS stack (-m10000000)
+    1.576      memory lost to fragmentation
+11732.541      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 618104 5194 3828 2 2 ]
+unreached in proctype urcu_reader
+       line 268, "pan.___", state 57, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 88, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 104, "(1)"
+       line 249, "pan.___", state 112, "(1)"
+       line 253, "pan.___", state 124, "(1)"
+       line 257, "pan.___", state 132, "(1)"
+       line 407, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 190, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 204, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 223, "(1)"
+       line 434, "pan.___", state 253, "(1)"
+       line 438, "pan.___", state 266, "(1)"
+       line 687, "pan.___", state 287, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 407, "pan.___", state 294, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 326, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 340, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 359, "(1)"
+       line 434, "pan.___", state 389, "(1)"
+       line 438, "pan.___", state 402, "(1)"
+       line 407, "pan.___", state 423, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 455, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 469, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 488, "(1)"
+       line 434, "pan.___", state 518, "(1)"
+       line 438, "pan.___", state 531, "(1)"
+       line 407, "pan.___", state 554, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 556, "(1)"
+       line 407, "pan.___", state 557, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 557, "else"
+       line 407, "pan.___", state 560, "(1)"
+       line 411, "pan.___", state 568, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 570, "(1)"
+       line 411, "pan.___", state 571, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 571, "else"
+       line 411, "pan.___", state 574, "(1)"
+       line 411, "pan.___", state 575, "(1)"
+       line 411, "pan.___", state 575, "(1)"
+       line 409, "pan.___", state 580, "((i<1))"
+       line 409, "pan.___", state 580, "((i>=1))"
+       line 416, "pan.___", state 586, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 588, "(1)"
+       line 416, "pan.___", state 589, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 589, "else"
+       line 416, "pan.___", state 592, "(1)"
+       line 416, "pan.___", state 593, "(1)"
+       line 416, "pan.___", state 593, "(1)"
+       line 420, "pan.___", state 600, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 602, "(1)"
+       line 420, "pan.___", state 603, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 603, "else"
+       line 420, "pan.___", state 606, "(1)"
+       line 420, "pan.___", state 607, "(1)"
+       line 420, "pan.___", state 607, "(1)"
+       line 418, "pan.___", state 612, "((i<2))"
+       line 418, "pan.___", state 612, "((i>=2))"
+       line 425, "pan.___", state 619, "(1)"
+       line 425, "pan.___", state 620, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 620, "else"
+       line 425, "pan.___", state 623, "(1)"
+       line 425, "pan.___", state 624, "(1)"
+       line 425, "pan.___", state 624, "(1)"
+       line 429, "pan.___", state 632, "(1)"
+       line 429, "pan.___", state 633, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 633, "else"
+       line 429, "pan.___", state 636, "(1)"
+       line 429, "pan.___", state 637, "(1)"
+       line 429, "pan.___", state 637, "(1)"
+       line 427, "pan.___", state 642, "((i<1))"
+       line 427, "pan.___", state 642, "((i>=1))"
+       line 434, "pan.___", state 649, "(1)"
+       line 434, "pan.___", state 650, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 650, "else"
+       line 434, "pan.___", state 653, "(1)"
+       line 434, "pan.___", state 654, "(1)"
+       line 434, "pan.___", state 654, "(1)"
+       line 438, "pan.___", state 662, "(1)"
+       line 438, "pan.___", state 663, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 663, "else"
+       line 438, "pan.___", state 666, "(1)"
+       line 438, "pan.___", state 667, "(1)"
+       line 438, "pan.___", state 667, "(1)"
+       line 436, "pan.___", state 672, "((i<2))"
+       line 436, "pan.___", state 672, "((i>=2))"
+       line 446, "pan.___", state 676, "(1)"
+       line 446, "pan.___", state 676, "(1)"
+       line 687, "pan.___", state 679, "cached_urcu_active_readers = (tmp+1)"
+       line 687, "pan.___", state 680, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 687, "pan.___", state 681, "(1)"
+       line 407, "pan.___", state 688, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 720, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 734, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 753, "(1)"
+       line 434, "pan.___", state 783, "(1)"
+       line 438, "pan.___", state 796, "(1)"
+       line 407, "pan.___", state 824, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 856, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 870, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 889, "(1)"
+       line 434, "pan.___", state 919, "(1)"
+       line 438, "pan.___", state 932, "(1)"
+       line 407, "pan.___", state 953, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 955, "(1)"
+       line 407, "pan.___", state 956, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 956, "else"
+       line 407, "pan.___", state 959, "(1)"
+       line 411, "pan.___", state 967, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 969, "(1)"
+       line 411, "pan.___", state 970, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 970, "else"
+       line 411, "pan.___", state 973, "(1)"
+       line 411, "pan.___", state 974, "(1)"
+       line 411, "pan.___", state 974, "(1)"
+       line 409, "pan.___", state 979, "((i<1))"
+       line 409, "pan.___", state 979, "((i>=1))"
+       line 416, "pan.___", state 985, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 987, "(1)"
+       line 416, "pan.___", state 988, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 988, "else"
+       line 416, "pan.___", state 991, "(1)"
+       line 416, "pan.___", state 992, "(1)"
+       line 416, "pan.___", state 992, "(1)"
+       line 420, "pan.___", state 999, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 1001, "(1)"
+       line 420, "pan.___", state 1002, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 1002, "else"
+       line 420, "pan.___", state 1005, "(1)"
+       line 420, "pan.___", state 1006, "(1)"
+       line 420, "pan.___", state 1006, "(1)"
+       line 418, "pan.___", state 1011, "((i<2))"
+       line 418, "pan.___", state 1011, "((i>=2))"
+       line 425, "pan.___", state 1018, "(1)"
+       line 425, "pan.___", state 1019, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 1019, "else"
+       line 425, "pan.___", state 1022, "(1)"
+       line 425, "pan.___", state 1023, "(1)"
+       line 425, "pan.___", state 1023, "(1)"
+       line 429, "pan.___", state 1031, "(1)"
+       line 429, "pan.___", state 1032, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 1032, "else"
+       line 429, "pan.___", state 1035, "(1)"
+       line 429, "pan.___", state 1036, "(1)"
+       line 429, "pan.___", state 1036, "(1)"
+       line 427, "pan.___", state 1041, "((i<1))"
+       line 427, "pan.___", state 1041, "((i>=1))"
+       line 434, "pan.___", state 1048, "(1)"
+       line 434, "pan.___", state 1049, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 1049, "else"
+       line 434, "pan.___", state 1052, "(1)"
+       line 434, "pan.___", state 1053, "(1)"
+       line 434, "pan.___", state 1053, "(1)"
+       line 438, "pan.___", state 1061, "(1)"
+       line 438, "pan.___", state 1062, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 1062, "else"
+       line 438, "pan.___", state 1065, "(1)"
+       line 438, "pan.___", state 1066, "(1)"
+       line 438, "pan.___", state 1066, "(1)"
+       line 436, "pan.___", state 1071, "((i<2))"
+       line 436, "pan.___", state 1071, "((i>=2))"
+       line 446, "pan.___", state 1075, "(1)"
+       line 446, "pan.___", state 1075, "(1)"
+       line 695, "pan.___", state 1079, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 407, "pan.___", state 1084, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1116, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1130, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1149, "(1)"
+       line 434, "pan.___", state 1179, "(1)"
+       line 438, "pan.___", state 1192, "(1)"
+       line 407, "pan.___", state 1216, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1248, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1262, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1281, "(1)"
+       line 434, "pan.___", state 1311, "(1)"
+       line 438, "pan.___", state 1324, "(1)"
+       line 407, "pan.___", state 1349, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1381, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1395, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1414, "(1)"
+       line 434, "pan.___", state 1444, "(1)"
+       line 438, "pan.___", state 1457, "(1)"
+       line 407, "pan.___", state 1478, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1510, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1524, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1543, "(1)"
+       line 434, "pan.___", state 1573, "(1)"
+       line 438, "pan.___", state 1586, "(1)"
+       line 407, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1644, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1658, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1677, "(1)"
+       line 434, "pan.___", state 1707, "(1)"
+       line 438, "pan.___", state 1720, "(1)"
+       line 407, "pan.___", state 1741, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1773, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1787, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1806, "(1)"
+       line 434, "pan.___", state 1836, "(1)"
+       line 438, "pan.___", state 1849, "(1)"
+       line 407, "pan.___", state 1873, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1905, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1919, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1938, "(1)"
+       line 434, "pan.___", state 1968, "(1)"
+       line 438, "pan.___", state 1981, "(1)"
+       line 734, "pan.___", state 2002, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 407, "pan.___", state 2009, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2041, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2055, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2074, "(1)"
+       line 434, "pan.___", state 2104, "(1)"
+       line 438, "pan.___", state 2117, "(1)"
+       line 407, "pan.___", state 2138, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2170, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2184, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2203, "(1)"
+       line 434, "pan.___", state 2233, "(1)"
+       line 438, "pan.___", state 2246, "(1)"
+       line 407, "pan.___", state 2269, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 2271, "(1)"
+       line 407, "pan.___", state 2272, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 2272, "else"
+       line 407, "pan.___", state 2275, "(1)"
+       line 411, "pan.___", state 2283, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 2285, "(1)"
+       line 411, "pan.___", state 2286, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 2286, "else"
+       line 411, "pan.___", state 2289, "(1)"
+       line 411, "pan.___", state 2290, "(1)"
+       line 411, "pan.___", state 2290, "(1)"
+       line 409, "pan.___", state 2295, "((i<1))"
+       line 409, "pan.___", state 2295, "((i>=1))"
+       line 416, "pan.___", state 2301, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 2303, "(1)"
+       line 416, "pan.___", state 2304, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 2304, "else"
+       line 416, "pan.___", state 2307, "(1)"
+       line 416, "pan.___", state 2308, "(1)"
+       line 416, "pan.___", state 2308, "(1)"
+       line 420, "pan.___", state 2315, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 2317, "(1)"
+       line 420, "pan.___", state 2318, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 2318, "else"
+       line 420, "pan.___", state 2321, "(1)"
+       line 420, "pan.___", state 2322, "(1)"
+       line 420, "pan.___", state 2322, "(1)"
+       line 418, "pan.___", state 2327, "((i<2))"
+       line 418, "pan.___", state 2327, "((i>=2))"
+       line 425, "pan.___", state 2334, "(1)"
+       line 425, "pan.___", state 2335, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 2335, "else"
+       line 425, "pan.___", state 2338, "(1)"
+       line 425, "pan.___", state 2339, "(1)"
+       line 425, "pan.___", state 2339, "(1)"
+       line 429, "pan.___", state 2347, "(1)"
+       line 429, "pan.___", state 2348, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 2348, "else"
+       line 429, "pan.___", state 2351, "(1)"
+       line 429, "pan.___", state 2352, "(1)"
+       line 429, "pan.___", state 2352, "(1)"
+       line 427, "pan.___", state 2357, "((i<1))"
+       line 427, "pan.___", state 2357, "((i>=1))"
+       line 434, "pan.___", state 2364, "(1)"
+       line 434, "pan.___", state 2365, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 2365, "else"
+       line 434, "pan.___", state 2368, "(1)"
+       line 434, "pan.___", state 2369, "(1)"
+       line 434, "pan.___", state 2369, "(1)"
+       line 438, "pan.___", state 2377, "(1)"
+       line 438, "pan.___", state 2378, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 2378, "else"
+       line 438, "pan.___", state 2381, "(1)"
+       line 438, "pan.___", state 2382, "(1)"
+       line 438, "pan.___", state 2382, "(1)"
+       line 436, "pan.___", state 2387, "((i<2))"
+       line 436, "pan.___", state 2387, "((i>=2))"
+       line 446, "pan.___", state 2391, "(1)"
+       line 446, "pan.___", state 2391, "(1)"
+       line 734, "pan.___", state 2394, "cached_urcu_active_readers = (tmp+1)"
+       line 734, "pan.___", state 2395, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 734, "pan.___", state 2396, "(1)"
+       line 407, "pan.___", state 2403, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2435, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2449, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2468, "(1)"
+       line 434, "pan.___", state 2498, "(1)"
+       line 438, "pan.___", state 2511, "(1)"
+       line 407, "pan.___", state 2538, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2570, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2584, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2603, "(1)"
+       line 434, "pan.___", state 2633, "(1)"
+       line 438, "pan.___", state 2646, "(1)"
+       line 407, "pan.___", state 2667, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2699, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2713, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2732, "(1)"
+       line 434, "pan.___", state 2762, "(1)"
+       line 438, "pan.___", state 2775, "(1)"
+       line 245, "pan.___", state 2808, "(1)"
+       line 253, "pan.___", state 2828, "(1)"
+       line 257, "pan.___", state 2836, "(1)"
+       line 245, "pan.___", state 2851, "(1)"
+       line 253, "pan.___", state 2871, "(1)"
+       line 257, "pan.___", state 2879, "(1)"
+       line 929, "pan.___", state 2896, "-end-"
+       (245 of 2896 states)
+unreached in proctype urcu_writer
+       line 407, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 425, "pan.___", state 110, "(1)"
+       line 429, "pan.___", state 123, "(1)"
+       line 434, "pan.___", state 140, "(1)"
+       line 268, "pan.___", state 176, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 185, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 198, "cache_dirty_rcu_ptr = 0"
+       line 407, "pan.___", state 238, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 252, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 270, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 284, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 303, "(1)"
+       line 429, "pan.___", state 316, "(1)"
+       line 434, "pan.___", state 333, "(1)"
+       line 438, "pan.___", state 346, "(1)"
+       line 411, "pan.___", state 383, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 401, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 415, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 447, "(1)"
+       line 434, "pan.___", state 464, "(1)"
+       line 438, "pan.___", state 477, "(1)"
+       line 411, "pan.___", state 522, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 540, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 554, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 586, "(1)"
+       line 434, "pan.___", state 603, "(1)"
+       line 438, "pan.___", state 616, "(1)"
+       line 411, "pan.___", state 651, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 669, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 683, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 715, "(1)"
+       line 434, "pan.___", state 732, "(1)"
+       line 438, "pan.___", state 745, "(1)"
+       line 411, "pan.___", state 782, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 800, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 814, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 846, "(1)"
+       line 434, "pan.___", state 863, "(1)"
+       line 438, "pan.___", state 876, "(1)"
+       line 268, "pan.___", state 931, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 940, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 955, "(1)"
+       line 280, "pan.___", state 962, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 978, "(1)"
+       line 249, "pan.___", state 986, "(1)"
+       line 253, "pan.___", state 998, "(1)"
+       line 257, "pan.___", state 1006, "(1)"
+       line 268, "pan.___", state 1037, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1046, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1059, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1068, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1084, "(1)"
+       line 249, "pan.___", state 1092, "(1)"
+       line 253, "pan.___", state 1104, "(1)"
+       line 257, "pan.___", state 1112, "(1)"
+       line 272, "pan.___", state 1138, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1151, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1160, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1176, "(1)"
+       line 249, "pan.___", state 1184, "(1)"
+       line 253, "pan.___", state 1196, "(1)"
+       line 257, "pan.___", state 1204, "(1)"
+       line 268, "pan.___", state 1235, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1244, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1257, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1266, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1282, "(1)"
+       line 249, "pan.___", state 1290, "(1)"
+       line 253, "pan.___", state 1302, "(1)"
+       line 257, "pan.___", state 1310, "(1)"
+       line 272, "pan.___", state 1336, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1349, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1358, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1374, "(1)"
+       line 249, "pan.___", state 1382, "(1)"
+       line 253, "pan.___", state 1394, "(1)"
+       line 257, "pan.___", state 1402, "(1)"
+       line 268, "pan.___", state 1433, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1442, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1455, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1464, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1480, "(1)"
+       line 249, "pan.___", state 1488, "(1)"
+       line 253, "pan.___", state 1500, "(1)"
+       line 257, "pan.___", state 1508, "(1)"
+       line 272, "pan.___", state 1534, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1547, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1556, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1572, "(1)"
+       line 249, "pan.___", state 1580, "(1)"
+       line 253, "pan.___", state 1592, "(1)"
+       line 257, "pan.___", state 1600, "(1)"
+       line 268, "pan.___", state 1631, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1640, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1653, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1662, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1678, "(1)"
+       line 249, "pan.___", state 1686, "(1)"
+       line 253, "pan.___", state 1698, "(1)"
+       line 257, "pan.___", state 1706, "(1)"
+       line 1304, "pan.___", state 1722, "-end-"
+       (103 of 1722 states)
+unreached in proctype :init:
+       (0 of 28 states)
+unreached in proctype :never:
+       line 1369, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 3.14e+05 seconds
+pan: rate 1063.7031 states/second
+pan: avg transition delay 1.6359e-06 usec
+cp .input.spin urcu_progress_writer.spin.input
+cp .input.spin.trail urcu_progress_writer.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi-compress'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer.spin.input b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer.spin.input
new file mode 100644 (file)
index 0000000..e7f1d2d
--- /dev/null
@@ -0,0 +1,1340 @@
+#define WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer_error.define b/formal-model/urcu-controldataflow-alpha-ipi-compress/urcu_progress_writer_error.define
new file mode 100644 (file)
index 0000000..8d304f5
--- /dev/null
@@ -0,0 +1,2 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/.input.spin b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/.input.spin
new file mode 100644 (file)
index 0000000..21196d4
--- /dev/null
@@ -0,0 +1,1156 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/DEFINES b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/DEFINES
new file mode 100644 (file)
index 0000000..32299c1
--- /dev/null
@@ -0,0 +1,18 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/Makefile b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/Makefile
new file mode 100644 (file)
index 0000000..abf201c
--- /dev/null
@@ -0,0 +1,171 @@
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) Mathieu Desnoyers, 2009
+#
+# Authors: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+#CFLAGS=-DSAFETY
+#for multi-core verif, 15.5GB shared mem, use files if full
+#CFLAGS=-DHASH64 -DMEMLIM=15500 -DNCORE=2
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88 -DMEMLIM=15500 -DNCORE=8
+
+#liveness
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88
+CFLAGS=-DHASH64 -DCOLLAPSE
+#CFLAGS=-DHASH64
+
+SPINFILE=urcu.spin
+
+default:
+       make urcu_free | tee urcu_free.log
+       make urcu_free_no_mb | tee urcu_free_no_mb.log
+       make urcu_free_no_rmb | tee urcu_free_no_rmb.log
+       make urcu_free_no_wmb | tee urcu_free_no_wmb.log
+       make urcu_free_single_flip | tee urcu_free_single_flip.log
+       make urcu_progress_writer | tee urcu_progress_writer.log
+       make urcu_progress_reader | tee urcu_progress_reader.log
+       make urcu_progress_writer_error | tee urcu_progress_writer_error.log
+       make asserts | tee asserts.log
+       make summary
+
+#show trail : spin -v -t -N pan.ltl input.spin
+# after each individual make.
+
+summary:
+       @echo
+       @echo "Verification summary"
+       @grep errors: *.log
+
+asserts: clean
+       cat DEFINES > .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X .input.spin
+       gcc -O2 -w ${CFLAGS} -DSAFETY -o pan pan.c
+       ./pan -v -c1 -X -m10000000 -w20
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free: clean urcu_free_ltl run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested: clean urcu_free_ltl urcu_free_nested_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested_define:
+       cp urcu_free_nested.define .input.define
+
+urcu_free_no_rmb: clean urcu_free_ltl urcu_free_no_rmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_rmb_define:
+       cp urcu_free_no_rmb.define .input.define
+
+urcu_free_no_wmb: clean urcu_free_ltl urcu_free_no_wmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_wmb_define:
+       cp urcu_free_no_wmb.define .input.define
+
+urcu_free_no_mb: clean urcu_free_ltl urcu_free_no_mb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_mb_define:
+       cp urcu_free_no_mb.define .input.define
+
+urcu_free_single_flip: clean urcu_free_ltl urcu_free_single_flip_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_single_flip_define:
+       cp urcu_free_single_flip.define .input.define
+
+urcu_free_ltl:
+       touch .input.define
+       cat .input.define >> pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+
+# Progress checks
+
+urcu_progress_writer: clean urcu_progress_writer_ltl \
+               urcu_progress_writer_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_define:
+       cp urcu_progress_writer.define .input.define
+
+urcu_progress_writer_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_reader: clean urcu_progress_reader_ltl \
+               urcu_progress_reader_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_reader_define:
+       cp urcu_progress_reader.define .input.define
+
+urcu_progress_reader_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_writer_error: clean urcu_progress_writer_error_ltl \
+               urcu_progress_writer_error_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_error_define:
+       cp urcu_progress_writer_error.define .input.define
+
+urcu_progress_writer_error_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+
+run_weak_fair: pan
+       ./pan -a -f -v -c1 -X -m10000000 -w20
+
+run: pan
+       ./pan -a -v -c1 -X -m10000000 -w20
+
+pan: pan.c
+       gcc -O2 -w ${CFLAGS} -o pan pan.c
+
+pan.c: pan.ltl ${SPINFILE}
+       cat .input.define > .input.spin
+       cat DEFINES >> .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X -N pan.ltl .input.spin
+
+.PHONY: clean default distclean summary
+clean:
+       rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+distclean:
+       rm -f *.trail *.input *.log
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/asserts.log b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/asserts.log
new file mode 100644 (file)
index 0000000..f1445e4
--- /dev/null
@@ -0,0 +1,266 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+cat DEFINES > .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w20
+Depth=    3250 States=    1e+06 Transitions= 2.74e+08 Memory=   500.529        t=    357 R=   3e+03
+Depth=    3250 States=    2e+06 Transitions= 5.69e+08 Memory=   537.248        t=    774 R=   3e+03
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             - (none specified)
+       assertion violations    +
+       cycle checks            - (disabled by -DSAFETY)
+       invalid end states      +
+
+State-vector 72 byte, depth reached 3250, errors: 0
+  2668047 states, stored
+7.3166567e+08 states, matched
+7.3433372e+08 transitions (= stored+matched)
+4.2954757e+09 atomic steps
+hash conflicts: 4.8630608e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+  254.445      equivalent memory usage for states (stored*(State-vector + overhead))
+   94.790      actual memory usage for states (compression: 37.25%)
+               state-vector as stored = 9 byte + 28 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  560.490      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 25912 2128 1970 2 ]
+unreached in proctype urcu_reader
+       line 267, ".input.spin", state 55, "cache_dirty_urcu_gp_ctr = 0"
+       line 275, ".input.spin", state 77, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 86, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 102, "(1)"
+       line 248, ".input.spin", state 110, "(1)"
+       line 252, ".input.spin", state 122, "(1)"
+       line 256, ".input.spin", state 130, "(1)"
+       line 403, ".input.spin", state 156, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, ".input.spin", state 188, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 202, "cache_dirty_rcu_data[i] = 0"
+       line 421, ".input.spin", state 221, "(1)"
+       line 430, ".input.spin", state 251, "(1)"
+       line 434, ".input.spin", state 264, "(1)"
+       line 613, ".input.spin", state 285, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 403, ".input.spin", state 292, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, ".input.spin", state 324, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 338, "cache_dirty_rcu_data[i] = 0"
+       line 421, ".input.spin", state 357, "(1)"
+       line 430, ".input.spin", state 387, "(1)"
+       line 434, ".input.spin", state 400, "(1)"
+       line 403, ".input.spin", state 421, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, ".input.spin", state 453, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 467, "cache_dirty_rcu_data[i] = 0"
+       line 421, ".input.spin", state 486, "(1)"
+       line 430, ".input.spin", state 516, "(1)"
+       line 434, ".input.spin", state 529, "(1)"
+       line 403, ".input.spin", state 552, "cache_dirty_urcu_gp_ctr = 0"
+       line 403, ".input.spin", state 554, "(1)"
+       line 403, ".input.spin", state 555, "(cache_dirty_urcu_gp_ctr)"
+       line 403, ".input.spin", state 555, "else"
+       line 403, ".input.spin", state 558, "(1)"
+       line 407, ".input.spin", state 566, "cache_dirty_urcu_active_readers = 0"
+       line 407, ".input.spin", state 568, "(1)"
+       line 407, ".input.spin", state 569, "(cache_dirty_urcu_active_readers)"
+       line 407, ".input.spin", state 569, "else"
+       line 407, ".input.spin", state 572, "(1)"
+       line 407, ".input.spin", state 573, "(1)"
+       line 407, ".input.spin", state 573, "(1)"
+       line 405, ".input.spin", state 578, "((i<1))"
+       line 405, ".input.spin", state 578, "((i>=1))"
+       line 412, ".input.spin", state 584, "cache_dirty_rcu_ptr = 0"
+       line 412, ".input.spin", state 586, "(1)"
+       line 412, ".input.spin", state 587, "(cache_dirty_rcu_ptr)"
+       line 412, ".input.spin", state 587, "else"
+       line 412, ".input.spin", state 590, "(1)"
+       line 412, ".input.spin", state 591, "(1)"
+       line 412, ".input.spin", state 591, "(1)"
+       line 416, ".input.spin", state 598, "cache_dirty_rcu_data[i] = 0"
+       line 416, ".input.spin", state 600, "(1)"
+       line 416, ".input.spin", state 601, "(cache_dirty_rcu_data[i])"
+       line 416, ".input.spin", state 601, "else"
+       line 416, ".input.spin", state 604, "(1)"
+       line 416, ".input.spin", state 605, "(1)"
+       line 416, ".input.spin", state 605, "(1)"
+       line 414, ".input.spin", state 610, "((i<2))"
+       line 414, ".input.spin", state 610, "((i>=2))"
+       line 421, ".input.spin", state 617, "(1)"
+       line 421, ".input.spin", state 618, "(!(cache_dirty_urcu_gp_ctr))"
+       line 421, ".input.spin", state 618, "else"
+       line 421, ".input.spin", state 621, "(1)"
+       line 421, ".input.spin", state 622, "(1)"
+       line 421, ".input.spin", state 622, "(1)"
+       line 425, ".input.spin", state 630, "(1)"
+       line 425, ".input.spin", state 631, "(!(cache_dirty_urcu_active_readers))"
+       line 425, ".input.spin", state 631, "else"
+       line 425, ".input.spin", state 634, "(1)"
+       line 425, ".input.spin", state 635, "(1)"
+       line 425, ".input.spin", state 635, "(1)"
+       line 423, ".input.spin", state 640, "((i<1))"
+       line 423, ".input.spin", state 640, "((i>=1))"
+       line 430, ".input.spin", state 647, "(1)"
+       line 430, ".input.spin", state 648, "(!(cache_dirty_rcu_ptr))"
+       line 430, ".input.spin", state 648, "else"
+       line 430, ".input.spin", state 651, "(1)"
+       line 430, ".input.spin", state 652, "(1)"
+       line 430, ".input.spin", state 652, "(1)"
+       line 434, ".input.spin", state 660, "(1)"
+       line 434, ".input.spin", state 661, "(!(cache_dirty_rcu_data[i]))"
+       line 434, ".input.spin", state 661, "else"
+       line 434, ".input.spin", state 664, "(1)"
+       line 434, ".input.spin", state 665, "(1)"
+       line 434, ".input.spin", state 665, "(1)"
+       line 432, ".input.spin", state 670, "((i<2))"
+       line 432, ".input.spin", state 670, "((i>=2))"
+       line 442, ".input.spin", state 674, "(1)"
+       line 442, ".input.spin", state 674, "(1)"
+       line 613, ".input.spin", state 677, "cached_urcu_active_readers = (tmp+1)"
+       line 613, ".input.spin", state 678, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 613, ".input.spin", state 679, "(1)"
+       line 403, ".input.spin", state 686, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, ".input.spin", state 718, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 732, "cache_dirty_rcu_data[i] = 0"
+       line 421, ".input.spin", state 751, "(1)"
+       line 430, ".input.spin", state 781, "(1)"
+       line 434, ".input.spin", state 794, "(1)"
+       line 403, ".input.spin", state 821, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, ".input.spin", state 853, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 867, "cache_dirty_rcu_data[i] = 0"
+       line 421, ".input.spin", state 886, "(1)"
+       line 430, ".input.spin", state 916, "(1)"
+       line 434, ".input.spin", state 929, "(1)"
+       line 403, ".input.spin", state 950, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, ".input.spin", state 982, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 996, "cache_dirty_rcu_data[i] = 0"
+       line 421, ".input.spin", state 1015, "(1)"
+       line 430, ".input.spin", state 1045, "(1)"
+       line 434, ".input.spin", state 1058, "(1)"
+       line 244, ".input.spin", state 1091, "(1)"
+       line 252, ".input.spin", state 1111, "(1)"
+       line 256, ".input.spin", state 1119, "(1)"
+       line 747, ".input.spin", state 1136, "-end-"
+       (91 of 1136 states)
+unreached in proctype urcu_writer
+       line 403, ".input.spin", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, ".input.spin", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 412, ".input.spin", state 77, "cache_dirty_rcu_ptr = 0"
+       line 421, ".input.spin", state 110, "(1)"
+       line 425, ".input.spin", state 123, "(1)"
+       line 430, ".input.spin", state 140, "(1)"
+       line 267, ".input.spin", state 176, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 185, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 198, "cache_dirty_rcu_ptr = 0"
+       line 403, ".input.spin", state 238, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, ".input.spin", state 252, "cache_dirty_urcu_active_readers = 0"
+       line 412, ".input.spin", state 270, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 284, "cache_dirty_rcu_data[i] = 0"
+       line 421, ".input.spin", state 303, "(1)"
+       line 425, ".input.spin", state 316, "(1)"
+       line 430, ".input.spin", state 333, "(1)"
+       line 434, ".input.spin", state 346, "(1)"
+       line 407, ".input.spin", state 383, "cache_dirty_urcu_active_readers = 0"
+       line 412, ".input.spin", state 401, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 415, "cache_dirty_rcu_data[i] = 0"
+       line 425, ".input.spin", state 447, "(1)"
+       line 430, ".input.spin", state 464, "(1)"
+       line 434, ".input.spin", state 477, "(1)"
+       line 407, ".input.spin", state 522, "cache_dirty_urcu_active_readers = 0"
+       line 412, ".input.spin", state 540, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 554, "cache_dirty_rcu_data[i] = 0"
+       line 425, ".input.spin", state 586, "(1)"
+       line 430, ".input.spin", state 603, "(1)"
+       line 434, ".input.spin", state 616, "(1)"
+       line 407, ".input.spin", state 651, "cache_dirty_urcu_active_readers = 0"
+       line 412, ".input.spin", state 669, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 683, "cache_dirty_rcu_data[i] = 0"
+       line 425, ".input.spin", state 715, "(1)"
+       line 430, ".input.spin", state 732, "(1)"
+       line 434, ".input.spin", state 745, "(1)"
+       line 407, ".input.spin", state 782, "cache_dirty_urcu_active_readers = 0"
+       line 412, ".input.spin", state 800, "cache_dirty_rcu_ptr = 0"
+       line 416, ".input.spin", state 814, "cache_dirty_rcu_data[i] = 0"
+       line 425, ".input.spin", state 846, "(1)"
+       line 430, ".input.spin", state 863, "(1)"
+       line 434, ".input.spin", state 876, "(1)"
+       line 267, ".input.spin", state 931, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 940, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 955, "(1)"
+       line 279, ".input.spin", state 962, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 978, "(1)"
+       line 248, ".input.spin", state 986, "(1)"
+       line 252, ".input.spin", state 998, "(1)"
+       line 256, ".input.spin", state 1006, "(1)"
+       line 267, ".input.spin", state 1037, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 1046, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1059, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1068, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1084, "(1)"
+       line 248, ".input.spin", state 1092, "(1)"
+       line 252, ".input.spin", state 1104, "(1)"
+       line 256, ".input.spin", state 1112, "(1)"
+       line 271, ".input.spin", state 1138, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1151, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1160, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1176, "(1)"
+       line 248, ".input.spin", state 1184, "(1)"
+       line 252, ".input.spin", state 1196, "(1)"
+       line 256, ".input.spin", state 1204, "(1)"
+       line 267, ".input.spin", state 1235, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 1244, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1257, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1266, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1282, "(1)"
+       line 248, ".input.spin", state 1290, "(1)"
+       line 252, ".input.spin", state 1302, "(1)"
+       line 256, ".input.spin", state 1310, "(1)"
+       line 271, ".input.spin", state 1336, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1349, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1358, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1374, "(1)"
+       line 248, ".input.spin", state 1382, "(1)"
+       line 252, ".input.spin", state 1394, "(1)"
+       line 256, ".input.spin", state 1402, "(1)"
+       line 267, ".input.spin", state 1433, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 1442, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1455, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1464, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1480, "(1)"
+       line 248, ".input.spin", state 1488, "(1)"
+       line 252, ".input.spin", state 1500, "(1)"
+       line 256, ".input.spin", state 1508, "(1)"
+       line 271, ".input.spin", state 1534, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1547, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1556, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1572, "(1)"
+       line 248, ".input.spin", state 1580, "(1)"
+       line 252, ".input.spin", state 1592, "(1)"
+       line 256, ".input.spin", state 1600, "(1)"
+       line 267, ".input.spin", state 1631, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 1640, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1653, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1662, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1678, "(1)"
+       line 248, ".input.spin", state 1686, "(1)"
+       line 252, ".input.spin", state 1698, "(1)"
+       line 256, ".input.spin", state 1706, "(1)"
+       line 1122, ".input.spin", state 1722, "-end-"
+       (103 of 1722 states)
+unreached in proctype :init:
+       (0 of 26 states)
+
+pan: elapsed time 1.01e+03 seconds
+pan: rate  2639.775 states/second
+pan: avg transition delay 1.3764e-06 usec
+cp .input.spin asserts.spin.input
+cp .input.spin.trail asserts.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/asserts.spin.input b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/asserts.spin.input
new file mode 100644 (file)
index 0000000..21196d4
--- /dev/null
@@ -0,0 +1,1156 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/references.txt b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/references.txt
new file mode 100644 (file)
index 0000000..72c67a2
--- /dev/null
@@ -0,0 +1,13 @@
+http://spinroot.com/spin/Man/ltl.html
+http://en.wikipedia.org/wiki/Linear_temporal_logic
+http://www.dcs.gla.ac.uk/~muffy/MRS4-2002/lect11.ppt
+
+http://www.lsv.ens-cachan.fr/~gastin/ltl2ba/index.php
+http://spinroot.com/spin/Man/index.html
+http://spinroot.com/spin/Man/promela.html
+
+LTL vs CTL :
+
+http://spinroot.com/spin/Doc/course/lecture12.pdf p. 9, p. 15, p. 18
+http://www-i2.informatik.rwth-aachen.de/i2/fileadmin/user_upload/documents/Introduction_to_Model_Checking/mc_lec18.pdf
+  (downloaded)
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu.sh b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu.sh
new file mode 100644 (file)
index 0000000..65ff517
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Compiles and runs the urcu.spin Promela model.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) IBM Corporation, 2009
+#               Mathieu Desnoyers, 2009
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+#          Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+# Basic execution, without LTL clauses. See Makefile.
+
+spin -a urcu.spin
+cc -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w21
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu.spin b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu.spin
new file mode 100644 (file)
index 0000000..db5ab0e
--- /dev/null
@@ -0,0 +1,1138 @@
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free.log b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free.log
new file mode 100644 (file)
index 0000000..dd99ae5
--- /dev/null
@@ -0,0 +1,277 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1178)
+Depth=    3880 States=    1e+06 Transitions= 2.74e+08 Memory=   512.932        t=    435 R=   2e+03
+Depth=    3880 States=    2e+06 Transitions= 5.69e+08 Memory=   559.318        t=    929 R=   2e+03
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 3880, errors: 0
+  2668047 states, stored
+7.3167024e+08 states, matched
+7.3433829e+08 transitions (= stored+matched)
+4.2954757e+09 atomic steps
+hash conflicts: 4.8818996e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+  295.156      equivalent memory usage for states (stored*(State-vector + overhead))
+  124.292      actual memory usage for states (compression: 42.11%)
+               state-vector as stored = 13 byte + 36 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  589.983      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 25912 2128 1970 2 1 ]
+unreached in proctype urcu_reader
+       line 267, "pan.___", state 55, "cache_dirty_urcu_gp_ctr = 0"
+       line 275, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 86, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 102, "(1)"
+       line 248, "pan.___", state 110, "(1)"
+       line 252, "pan.___", state 122, "(1)"
+       line 256, "pan.___", state 130, "(1)"
+       line 403, "pan.___", state 156, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, "pan.___", state 188, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 202, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 221, "(1)"
+       line 430, "pan.___", state 251, "(1)"
+       line 434, "pan.___", state 264, "(1)"
+       line 613, "pan.___", state 285, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 403, "pan.___", state 292, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, "pan.___", state 324, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 338, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 357, "(1)"
+       line 430, "pan.___", state 387, "(1)"
+       line 434, "pan.___", state 400, "(1)"
+       line 403, "pan.___", state 421, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, "pan.___", state 453, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 467, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 486, "(1)"
+       line 430, "pan.___", state 516, "(1)"
+       line 434, "pan.___", state 529, "(1)"
+       line 403, "pan.___", state 552, "cache_dirty_urcu_gp_ctr = 0"
+       line 403, "pan.___", state 554, "(1)"
+       line 403, "pan.___", state 555, "(cache_dirty_urcu_gp_ctr)"
+       line 403, "pan.___", state 555, "else"
+       line 403, "pan.___", state 558, "(1)"
+       line 407, "pan.___", state 566, "cache_dirty_urcu_active_readers = 0"
+       line 407, "pan.___", state 568, "(1)"
+       line 407, "pan.___", state 569, "(cache_dirty_urcu_active_readers)"
+       line 407, "pan.___", state 569, "else"
+       line 407, "pan.___", state 572, "(1)"
+       line 407, "pan.___", state 573, "(1)"
+       line 407, "pan.___", state 573, "(1)"
+       line 405, "pan.___", state 578, "((i<1))"
+       line 405, "pan.___", state 578, "((i>=1))"
+       line 412, "pan.___", state 584, "cache_dirty_rcu_ptr = 0"
+       line 412, "pan.___", state 586, "(1)"
+       line 412, "pan.___", state 587, "(cache_dirty_rcu_ptr)"
+       line 412, "pan.___", state 587, "else"
+       line 412, "pan.___", state 590, "(1)"
+       line 412, "pan.___", state 591, "(1)"
+       line 412, "pan.___", state 591, "(1)"
+       line 416, "pan.___", state 598, "cache_dirty_rcu_data[i] = 0"
+       line 416, "pan.___", state 600, "(1)"
+       line 416, "pan.___", state 601, "(cache_dirty_rcu_data[i])"
+       line 416, "pan.___", state 601, "else"
+       line 416, "pan.___", state 604, "(1)"
+       line 416, "pan.___", state 605, "(1)"
+       line 416, "pan.___", state 605, "(1)"
+       line 414, "pan.___", state 610, "((i<2))"
+       line 414, "pan.___", state 610, "((i>=2))"
+       line 421, "pan.___", state 617, "(1)"
+       line 421, "pan.___", state 618, "(!(cache_dirty_urcu_gp_ctr))"
+       line 421, "pan.___", state 618, "else"
+       line 421, "pan.___", state 621, "(1)"
+       line 421, "pan.___", state 622, "(1)"
+       line 421, "pan.___", state 622, "(1)"
+       line 425, "pan.___", state 630, "(1)"
+       line 425, "pan.___", state 631, "(!(cache_dirty_urcu_active_readers))"
+       line 425, "pan.___", state 631, "else"
+       line 425, "pan.___", state 634, "(1)"
+       line 425, "pan.___", state 635, "(1)"
+       line 425, "pan.___", state 635, "(1)"
+       line 423, "pan.___", state 640, "((i<1))"
+       line 423, "pan.___", state 640, "((i>=1))"
+       line 430, "pan.___", state 647, "(1)"
+       line 430, "pan.___", state 648, "(!(cache_dirty_rcu_ptr))"
+       line 430, "pan.___", state 648, "else"
+       line 430, "pan.___", state 651, "(1)"
+       line 430, "pan.___", state 652, "(1)"
+       line 430, "pan.___", state 652, "(1)"
+       line 434, "pan.___", state 660, "(1)"
+       line 434, "pan.___", state 661, "(!(cache_dirty_rcu_data[i]))"
+       line 434, "pan.___", state 661, "else"
+       line 434, "pan.___", state 664, "(1)"
+       line 434, "pan.___", state 665, "(1)"
+       line 434, "pan.___", state 665, "(1)"
+       line 432, "pan.___", state 670, "((i<2))"
+       line 432, "pan.___", state 670, "((i>=2))"
+       line 442, "pan.___", state 674, "(1)"
+       line 442, "pan.___", state 674, "(1)"
+       line 613, "pan.___", state 677, "cached_urcu_active_readers = (tmp+1)"
+       line 613, "pan.___", state 678, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 613, "pan.___", state 679, "(1)"
+       line 403, "pan.___", state 686, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, "pan.___", state 718, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 732, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 751, "(1)"
+       line 430, "pan.___", state 781, "(1)"
+       line 434, "pan.___", state 794, "(1)"
+       line 403, "pan.___", state 821, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, "pan.___", state 853, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 867, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 886, "(1)"
+       line 430, "pan.___", state 916, "(1)"
+       line 434, "pan.___", state 929, "(1)"
+       line 403, "pan.___", state 950, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, "pan.___", state 982, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 996, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 1015, "(1)"
+       line 430, "pan.___", state 1045, "(1)"
+       line 434, "pan.___", state 1058, "(1)"
+       line 244, "pan.___", state 1091, "(1)"
+       line 252, "pan.___", state 1111, "(1)"
+       line 256, "pan.___", state 1119, "(1)"
+       line 747, "pan.___", state 1136, "-end-"
+       (91 of 1136 states)
+unreached in proctype urcu_writer
+       line 403, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 412, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 110, "(1)"
+       line 425, "pan.___", state 123, "(1)"
+       line 430, "pan.___", state 140, "(1)"
+       line 267, "pan.___", state 176, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 185, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 198, "cache_dirty_rcu_ptr = 0"
+       line 403, "pan.___", state 238, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 252, "cache_dirty_urcu_active_readers = 0"
+       line 412, "pan.___", state 270, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 284, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 303, "(1)"
+       line 425, "pan.___", state 316, "(1)"
+       line 430, "pan.___", state 333, "(1)"
+       line 434, "pan.___", state 346, "(1)"
+       line 407, "pan.___", state 383, "cache_dirty_urcu_active_readers = 0"
+       line 412, "pan.___", state 401, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 415, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 447, "(1)"
+       line 430, "pan.___", state 464, "(1)"
+       line 434, "pan.___", state 477, "(1)"
+       line 407, "pan.___", state 522, "cache_dirty_urcu_active_readers = 0"
+       line 412, "pan.___", state 540, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 554, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 586, "(1)"
+       line 430, "pan.___", state 603, "(1)"
+       line 434, "pan.___", state 616, "(1)"
+       line 407, "pan.___", state 651, "cache_dirty_urcu_active_readers = 0"
+       line 412, "pan.___", state 669, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 683, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 715, "(1)"
+       line 430, "pan.___", state 732, "(1)"
+       line 434, "pan.___", state 745, "(1)"
+       line 407, "pan.___", state 782, "cache_dirty_urcu_active_readers = 0"
+       line 412, "pan.___", state 800, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 814, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 846, "(1)"
+       line 430, "pan.___", state 863, "(1)"
+       line 434, "pan.___", state 876, "(1)"
+       line 267, "pan.___", state 931, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 940, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 955, "(1)"
+       line 279, "pan.___", state 962, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 978, "(1)"
+       line 248, "pan.___", state 986, "(1)"
+       line 252, "pan.___", state 998, "(1)"
+       line 256, "pan.___", state 1006, "(1)"
+       line 267, "pan.___", state 1037, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 1046, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1059, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1068, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1084, "(1)"
+       line 248, "pan.___", state 1092, "(1)"
+       line 252, "pan.___", state 1104, "(1)"
+       line 256, "pan.___", state 1112, "(1)"
+       line 271, "pan.___", state 1138, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1151, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1160, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1176, "(1)"
+       line 248, "pan.___", state 1184, "(1)"
+       line 252, "pan.___", state 1196, "(1)"
+       line 256, "pan.___", state 1204, "(1)"
+       line 267, "pan.___", state 1235, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 1244, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1257, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1266, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1282, "(1)"
+       line 248, "pan.___", state 1290, "(1)"
+       line 252, "pan.___", state 1302, "(1)"
+       line 256, "pan.___", state 1310, "(1)"
+       line 271, "pan.___", state 1336, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1349, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1358, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1374, "(1)"
+       line 248, "pan.___", state 1382, "(1)"
+       line 252, "pan.___", state 1394, "(1)"
+       line 256, "pan.___", state 1402, "(1)"
+       line 267, "pan.___", state 1433, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 1442, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1455, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1464, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1480, "(1)"
+       line 248, "pan.___", state 1488, "(1)"
+       line 252, "pan.___", state 1500, "(1)"
+       line 256, "pan.___", state 1508, "(1)"
+       line 271, "pan.___", state 1534, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1547, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1556, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1572, "(1)"
+       line 248, "pan.___", state 1580, "(1)"
+       line 252, "pan.___", state 1592, "(1)"
+       line 256, "pan.___", state 1600, "(1)"
+       line 267, "pan.___", state 1631, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 1640, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1653, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1662, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1678, "(1)"
+       line 248, "pan.___", state 1686, "(1)"
+       line 252, "pan.___", state 1698, "(1)"
+       line 256, "pan.___", state 1706, "(1)"
+       line 1122, "pan.___", state 1722, "-end-"
+       (103 of 1722 states)
+unreached in proctype :init:
+       (0 of 26 states)
+unreached in proctype :never:
+       line 1183, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 1.21e+03 seconds
+pan: rate 2200.5963 states/second
+pan: avg transition delay 1.651e-06 usec
+cp .input.spin urcu_free.spin.input
+cp .input.spin.trail urcu_free.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free.ltl b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free.ltl
new file mode 100644 (file)
index 0000000..6be1be9
--- /dev/null
@@ -0,0 +1 @@
+[] (!read_poison)
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free.spin.input b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free.spin.input
new file mode 100644 (file)
index 0000000..21196d4
--- /dev/null
@@ -0,0 +1,1156 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_nested.define b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_nested.define
new file mode 100644 (file)
index 0000000..0fb59bd
--- /dev/null
@@ -0,0 +1 @@
+#define READER_NEST_LEVEL 2
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.define b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.define
new file mode 100644 (file)
index 0000000..d99d793
--- /dev/null
@@ -0,0 +1 @@
+#define NO_MB
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.log b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.log
new file mode 100644 (file)
index 0000000..7fe9c9a
--- /dev/null
@@ -0,0 +1,628 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_mb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1179)
+pan: claim violated! (at depth 1246)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 3190, errors: 1
+   124498 states, stored
+ 15198533 states, matched
+ 15323031 transitions (= stored+matched)
+ 86866126 atomic steps
+hash conflicts:    473423 (resolved)
+
+Stats on memory usage (in Megabytes):
+   13.773      equivalent memory usage for states (stored*(State-vector + overhead))
+    6.400      actual memory usage for states (compression: 46.47%)
+               state-vector as stored = 18 byte + 36 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  472.111      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 2572 1245 179 2 2 ]
+unreached in proctype urcu_reader
+       line 713, "pan.___", state 12, "((i<1))"
+       line 713, "pan.___", state 12, "((i>=1))"
+       line 268, "pan.___", state 57, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 88, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 104, "(1)"
+       line 249, "pan.___", state 112, "(1)"
+       line 253, "pan.___", state 124, "(1)"
+       line 257, "pan.___", state 132, "(1)"
+       line 404, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 190, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 204, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 223, "(1)"
+       line 431, "pan.___", state 253, "(1)"
+       line 435, "pan.___", state 266, "(1)"
+       line 614, "pan.___", state 287, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 404, "pan.___", state 294, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 326, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 340, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 359, "(1)"
+       line 431, "pan.___", state 389, "(1)"
+       line 435, "pan.___", state 402, "(1)"
+       line 404, "pan.___", state 423, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 455, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 469, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 488, "(1)"
+       line 431, "pan.___", state 518, "(1)"
+       line 435, "pan.___", state 531, "(1)"
+       line 404, "pan.___", state 554, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 556, "(1)"
+       line 404, "pan.___", state 557, "(cache_dirty_urcu_gp_ctr)"
+       line 404, "pan.___", state 557, "else"
+       line 404, "pan.___", state 560, "(1)"
+       line 408, "pan.___", state 568, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 570, "(1)"
+       line 408, "pan.___", state 571, "(cache_dirty_urcu_active_readers)"
+       line 408, "pan.___", state 571, "else"
+       line 408, "pan.___", state 574, "(1)"
+       line 408, "pan.___", state 575, "(1)"
+       line 408, "pan.___", state 575, "(1)"
+       line 406, "pan.___", state 580, "((i<1))"
+       line 406, "pan.___", state 580, "((i>=1))"
+       line 413, "pan.___", state 586, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 588, "(1)"
+       line 413, "pan.___", state 589, "(cache_dirty_rcu_ptr)"
+       line 413, "pan.___", state 589, "else"
+       line 413, "pan.___", state 592, "(1)"
+       line 413, "pan.___", state 593, "(1)"
+       line 413, "pan.___", state 593, "(1)"
+       line 417, "pan.___", state 600, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 602, "(1)"
+       line 417, "pan.___", state 603, "(cache_dirty_rcu_data[i])"
+       line 417, "pan.___", state 603, "else"
+       line 417, "pan.___", state 606, "(1)"
+       line 417, "pan.___", state 607, "(1)"
+       line 417, "pan.___", state 607, "(1)"
+       line 415, "pan.___", state 612, "((i<2))"
+       line 415, "pan.___", state 612, "((i>=2))"
+       line 422, "pan.___", state 619, "(1)"
+       line 422, "pan.___", state 620, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 620, "else"
+       line 422, "pan.___", state 623, "(1)"
+       line 422, "pan.___", state 624, "(1)"
+       line 422, "pan.___", state 624, "(1)"
+       line 426, "pan.___", state 632, "(1)"
+       line 426, "pan.___", state 633, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 633, "else"
+       line 426, "pan.___", state 636, "(1)"
+       line 426, "pan.___", state 637, "(1)"
+       line 426, "pan.___", state 637, "(1)"
+       line 424, "pan.___", state 642, "((i<1))"
+       line 424, "pan.___", state 642, "((i>=1))"
+       line 431, "pan.___", state 649, "(1)"
+       line 431, "pan.___", state 650, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 650, "else"
+       line 431, "pan.___", state 653, "(1)"
+       line 431, "pan.___", state 654, "(1)"
+       line 431, "pan.___", state 654, "(1)"
+       line 435, "pan.___", state 662, "(1)"
+       line 435, "pan.___", state 663, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 663, "else"
+       line 435, "pan.___", state 666, "(1)"
+       line 435, "pan.___", state 667, "(1)"
+       line 435, "pan.___", state 667, "(1)"
+       line 433, "pan.___", state 672, "((i<2))"
+       line 433, "pan.___", state 672, "((i>=2))"
+       line 443, "pan.___", state 676, "(1)"
+       line 443, "pan.___", state 676, "(1)"
+       line 614, "pan.___", state 679, "cached_urcu_active_readers = (tmp+1)"
+       line 614, "pan.___", state 680, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 614, "pan.___", state 681, "(1)"
+       line 404, "pan.___", state 688, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 720, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 734, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 753, "(1)"
+       line 431, "pan.___", state 783, "(1)"
+       line 435, "pan.___", state 796, "(1)"
+       line 404, "pan.___", state 823, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 855, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 869, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 888, "(1)"
+       line 431, "pan.___", state 918, "(1)"
+       line 435, "pan.___", state 931, "(1)"
+       line 404, "pan.___", state 952, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 984, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 998, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 1017, "(1)"
+       line 431, "pan.___", state 1047, "(1)"
+       line 435, "pan.___", state 1060, "(1)"
+       line 245, "pan.___", state 1093, "(1)"
+       line 253, "pan.___", state 1113, "(1)"
+       line 257, "pan.___", state 1121, "(1)"
+       line 748, "pan.___", state 1138, "-end-"
+       (92 of 1138 states)
+unreached in proctype urcu_writer
+       line 837, "pan.___", state 12, "((i<1))"
+       line 837, "pan.___", state 12, "((i>=1))"
+       line 404, "pan.___", state 47, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 53, "(1)"
+       line 408, "pan.___", state 61, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 67, "(1)"
+       line 408, "pan.___", state 68, "(1)"
+       line 408, "pan.___", state 68, "(1)"
+       line 406, "pan.___", state 73, "((i<1))"
+       line 406, "pan.___", state 73, "((i>=1))"
+       line 413, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 85, "(1)"
+       line 413, "pan.___", state 86, "(1)"
+       line 413, "pan.___", state 86, "(1)"
+       line 417, "pan.___", state 99, "(1)"
+       line 417, "pan.___", state 100, "(1)"
+       line 417, "pan.___", state 100, "(1)"
+       line 415, "pan.___", state 105, "((i<2))"
+       line 415, "pan.___", state 105, "((i>=2))"
+       line 422, "pan.___", state 112, "(1)"
+       line 422, "pan.___", state 113, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 113, "else"
+       line 422, "pan.___", state 116, "(1)"
+       line 422, "pan.___", state 117, "(1)"
+       line 422, "pan.___", state 117, "(1)"
+       line 426, "pan.___", state 125, "(1)"
+       line 426, "pan.___", state 126, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 126, "else"
+       line 426, "pan.___", state 129, "(1)"
+       line 426, "pan.___", state 130, "(1)"
+       line 426, "pan.___", state 130, "(1)"
+       line 424, "pan.___", state 135, "((i<1))"
+       line 424, "pan.___", state 135, "((i>=1))"
+       line 431, "pan.___", state 142, "(1)"
+       line 431, "pan.___", state 143, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 143, "else"
+       line 431, "pan.___", state 146, "(1)"
+       line 431, "pan.___", state 147, "(1)"
+       line 431, "pan.___", state 147, "(1)"
+       line 435, "pan.___", state 155, "(1)"
+       line 435, "pan.___", state 156, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 156, "else"
+       line 435, "pan.___", state 159, "(1)"
+       line 435, "pan.___", state 160, "(1)"
+       line 435, "pan.___", state 160, "(1)"
+       line 433, "pan.___", state 165, "((i<2))"
+       line 433, "pan.___", state 165, "((i>=2))"
+       line 443, "pan.___", state 169, "(1)"
+       line 443, "pan.___", state 169, "(1)"
+       line 268, "pan.___", state 178, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 187, "cache_dirty_urcu_active_readers = 0"
+       line 270, "pan.___", state 195, "((i<1))"
+       line 270, "pan.___", state 195, "((i>=1))"
+       line 276, "pan.___", state 200, "cache_dirty_rcu_ptr = 0"
+       line 907, "pan.___", state 228, "old_data = cached_rcu_ptr"
+       line 918, "pan.___", state 232, "_proc_urcu_writer = (_proc_urcu_writer|(1<<4))"
+       line 404, "pan.___", state 240, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 246, "(1)"
+       line 408, "pan.___", state 254, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 260, "(1)"
+       line 408, "pan.___", state 261, "(1)"
+       line 408, "pan.___", state 261, "(1)"
+       line 406, "pan.___", state 266, "((i<1))"
+       line 406, "pan.___", state 266, "((i>=1))"
+       line 413, "pan.___", state 274, "(1)"
+       line 413, "pan.___", state 275, "(cache_dirty_rcu_ptr)"
+       line 413, "pan.___", state 275, "else"
+       line 413, "pan.___", state 278, "(1)"
+       line 413, "pan.___", state 279, "(1)"
+       line 413, "pan.___", state 279, "(1)"
+       line 417, "pan.___", state 286, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 292, "(1)"
+       line 417, "pan.___", state 293, "(1)"
+       line 417, "pan.___", state 293, "(1)"
+       line 415, "pan.___", state 298, "((i<2))"
+       line 415, "pan.___", state 298, "((i>=2))"
+       line 422, "pan.___", state 305, "(1)"
+       line 422, "pan.___", state 306, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 306, "else"
+       line 422, "pan.___", state 309, "(1)"
+       line 422, "pan.___", state 310, "(1)"
+       line 422, "pan.___", state 310, "(1)"
+       line 426, "pan.___", state 318, "(1)"
+       line 426, "pan.___", state 319, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 319, "else"
+       line 426, "pan.___", state 322, "(1)"
+       line 426, "pan.___", state 323, "(1)"
+       line 426, "pan.___", state 323, "(1)"
+       line 424, "pan.___", state 328, "((i<1))"
+       line 424, "pan.___", state 328, "((i>=1))"
+       line 431, "pan.___", state 335, "(1)"
+       line 431, "pan.___", state 336, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 336, "else"
+       line 431, "pan.___", state 339, "(1)"
+       line 431, "pan.___", state 340, "(1)"
+       line 431, "pan.___", state 340, "(1)"
+       line 435, "pan.___", state 348, "(1)"
+       line 435, "pan.___", state 349, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 349, "else"
+       line 435, "pan.___", state 352, "(1)"
+       line 435, "pan.___", state 353, "(1)"
+       line 435, "pan.___", state 353, "(1)"
+       line 433, "pan.___", state 358, "((i<2))"
+       line 433, "pan.___", state 358, "((i>=2))"
+       line 443, "pan.___", state 362, "(1)"
+       line 443, "pan.___", state 362, "(1)"
+       line 404, "pan.___", state 373, "(1)"
+       line 404, "pan.___", state 374, "(cache_dirty_urcu_gp_ctr)"
+       line 404, "pan.___", state 374, "else"
+       line 404, "pan.___", state 377, "(1)"
+       line 408, "pan.___", state 385, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 391, "(1)"
+       line 408, "pan.___", state 392, "(1)"
+       line 408, "pan.___", state 392, "(1)"
+       line 406, "pan.___", state 397, "((i<1))"
+       line 406, "pan.___", state 397, "((i>=1))"
+       line 413, "pan.___", state 403, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 409, "(1)"
+       line 413, "pan.___", state 410, "(1)"
+       line 413, "pan.___", state 410, "(1)"
+       line 417, "pan.___", state 417, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 423, "(1)"
+       line 417, "pan.___", state 424, "(1)"
+       line 417, "pan.___", state 424, "(1)"
+       line 415, "pan.___", state 429, "((i<2))"
+       line 415, "pan.___", state 429, "((i>=2))"
+       line 422, "pan.___", state 436, "(1)"
+       line 422, "pan.___", state 437, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 437, "else"
+       line 422, "pan.___", state 440, "(1)"
+       line 422, "pan.___", state 441, "(1)"
+       line 422, "pan.___", state 441, "(1)"
+       line 426, "pan.___", state 449, "(1)"
+       line 426, "pan.___", state 450, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 450, "else"
+       line 426, "pan.___", state 453, "(1)"
+       line 426, "pan.___", state 454, "(1)"
+       line 426, "pan.___", state 454, "(1)"
+       line 424, "pan.___", state 459, "((i<1))"
+       line 424, "pan.___", state 459, "((i>=1))"
+       line 431, "pan.___", state 466, "(1)"
+       line 431, "pan.___", state 467, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 467, "else"
+       line 431, "pan.___", state 470, "(1)"
+       line 431, "pan.___", state 471, "(1)"
+       line 431, "pan.___", state 471, "(1)"
+       line 435, "pan.___", state 479, "(1)"
+       line 435, "pan.___", state 480, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 480, "else"
+       line 435, "pan.___", state 483, "(1)"
+       line 435, "pan.___", state 484, "(1)"
+       line 435, "pan.___", state 484, "(1)"
+       line 433, "pan.___", state 489, "((i<2))"
+       line 433, "pan.___", state 489, "((i>=2))"
+       line 443, "pan.___", state 493, "(1)"
+       line 443, "pan.___", state 493, "(1)"
+       line 972, "pan.___", state 504, "_proc_urcu_writer = (_proc_urcu_writer&~((1<<9)))"
+       line 977, "pan.___", state 505, "_proc_urcu_writer = (_proc_urcu_writer&~(((1<<8)|(1<<7))))"
+       line 404, "pan.___", state 510, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 516, "(1)"
+       line 408, "pan.___", state 524, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 530, "(1)"
+       line 408, "pan.___", state 531, "(1)"
+       line 408, "pan.___", state 531, "(1)"
+       line 406, "pan.___", state 536, "((i<1))"
+       line 406, "pan.___", state 536, "((i>=1))"
+       line 413, "pan.___", state 542, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 548, "(1)"
+       line 413, "pan.___", state 549, "(1)"
+       line 413, "pan.___", state 549, "(1)"
+       line 417, "pan.___", state 556, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 562, "(1)"
+       line 417, "pan.___", state 563, "(1)"
+       line 417, "pan.___", state 563, "(1)"
+       line 415, "pan.___", state 568, "((i<2))"
+       line 415, "pan.___", state 568, "((i>=2))"
+       line 422, "pan.___", state 575, "(1)"
+       line 422, "pan.___", state 576, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 576, "else"
+       line 422, "pan.___", state 579, "(1)"
+       line 422, "pan.___", state 580, "(1)"
+       line 422, "pan.___", state 580, "(1)"
+       line 426, "pan.___", state 588, "(1)"
+       line 426, "pan.___", state 589, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 589, "else"
+       line 426, "pan.___", state 592, "(1)"
+       line 426, "pan.___", state 593, "(1)"
+       line 426, "pan.___", state 593, "(1)"
+       line 424, "pan.___", state 598, "((i<1))"
+       line 424, "pan.___", state 598, "((i>=1))"
+       line 431, "pan.___", state 605, "(1)"
+       line 431, "pan.___", state 606, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 606, "else"
+       line 431, "pan.___", state 609, "(1)"
+       line 431, "pan.___", state 610, "(1)"
+       line 431, "pan.___", state 610, "(1)"
+       line 435, "pan.___", state 618, "(1)"
+       line 435, "pan.___", state 619, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 619, "else"
+       line 435, "pan.___", state 622, "(1)"
+       line 435, "pan.___", state 623, "(1)"
+       line 435, "pan.___", state 623, "(1)"
+       line 443, "pan.___", state 632, "(1)"
+       line 443, "pan.___", state 632, "(1)"
+       line 404, "pan.___", state 639, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 653, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 671, "cache_dirty_rcu_ptr = 0"
+       line 422, "pan.___", state 704, "(1)"
+       line 426, "pan.___", state 717, "(1)"
+       line 431, "pan.___", state 734, "(1)"
+       line 435, "pan.___", state 747, "(1)"
+       line 408, "pan.___", state 784, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 802, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 816, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 848, "(1)"
+       line 431, "pan.___", state 865, "(1)"
+       line 435, "pan.___", state 878, "(1)"
+       line 1054, "pan.___", state 905, "_proc_urcu_writer = (_proc_urcu_writer|(1<<13))"
+       line 268, "pan.___", state 933, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 935, "(1)"
+       line 272, "pan.___", state 942, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 944, "(1)"
+       line 272, "pan.___", state 945, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 945, "else"
+       line 270, "pan.___", state 950, "((i<1))"
+       line 270, "pan.___", state 950, "((i>=1))"
+       line 276, "pan.___", state 955, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 957, "(1)"
+       line 276, "pan.___", state 958, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 958, "else"
+       line 280, "pan.___", state 964, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 966, "(1)"
+       line 280, "pan.___", state 967, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 967, "else"
+       line 278, "pan.___", state 972, "((i<2))"
+       line 278, "pan.___", state 972, "((i>=2))"
+       line 245, "pan.___", state 980, "(1)"
+       line 249, "pan.___", state 988, "(1)"
+       line 249, "pan.___", state 989, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 989, "else"
+       line 247, "pan.___", state 994, "((i<1))"
+       line 247, "pan.___", state 994, "((i>=1))"
+       line 253, "pan.___", state 1000, "(1)"
+       line 253, "pan.___", state 1001, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1001, "else"
+       line 257, "pan.___", state 1008, "(1)"
+       line 257, "pan.___", state 1009, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1009, "else"
+       line 262, "pan.___", state 1018, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1018, "else"
+       line 1108, "pan.___", state 1034, "((i<1))"
+       line 1108, "pan.___", state 1034, "((i>=1))"
+       line 268, "pan.___", state 1039, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1041, "(1)"
+       line 272, "pan.___", state 1048, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1050, "(1)"
+       line 272, "pan.___", state 1051, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1051, "else"
+       line 270, "pan.___", state 1056, "((i<1))"
+       line 270, "pan.___", state 1056, "((i>=1))"
+       line 276, "pan.___", state 1061, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1063, "(1)"
+       line 276, "pan.___", state 1064, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1064, "else"
+       line 280, "pan.___", state 1070, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1072, "(1)"
+       line 280, "pan.___", state 1073, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1073, "else"
+       line 278, "pan.___", state 1078, "((i<2))"
+       line 278, "pan.___", state 1078, "((i>=2))"
+       line 245, "pan.___", state 1086, "(1)"
+       line 249, "pan.___", state 1094, "(1)"
+       line 249, "pan.___", state 1095, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1095, "else"
+       line 247, "pan.___", state 1100, "((i<1))"
+       line 247, "pan.___", state 1100, "((i>=1))"
+       line 253, "pan.___", state 1106, "(1)"
+       line 253, "pan.___", state 1107, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1107, "else"
+       line 257, "pan.___", state 1114, "(1)"
+       line 257, "pan.___", state 1115, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1115, "else"
+       line 262, "pan.___", state 1124, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1124, "else"
+       line 295, "pan.___", state 1126, "(cache_dirty_urcu_gp_ctr)"
+       line 295, "pan.___", state 1126, "else"
+       line 1108, "pan.___", state 1127, "(cache_dirty_urcu_gp_ctr)"
+       line 1108, "pan.___", state 1127, "else"
+       line 268, "pan.___", state 1131, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1133, "(1)"
+       line 272, "pan.___", state 1140, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1142, "(1)"
+       line 272, "pan.___", state 1143, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1143, "else"
+       line 270, "pan.___", state 1148, "((i<1))"
+       line 270, "pan.___", state 1148, "((i>=1))"
+       line 276, "pan.___", state 1153, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1155, "(1)"
+       line 276, "pan.___", state 1156, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1156, "else"
+       line 280, "pan.___", state 1162, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1164, "(1)"
+       line 280, "pan.___", state 1165, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1165, "else"
+       line 278, "pan.___", state 1170, "((i<2))"
+       line 278, "pan.___", state 1170, "((i>=2))"
+       line 245, "pan.___", state 1178, "(1)"
+       line 249, "pan.___", state 1186, "(1)"
+       line 249, "pan.___", state 1187, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1187, "else"
+       line 247, "pan.___", state 1192, "((i<1))"
+       line 247, "pan.___", state 1192, "((i>=1))"
+       line 253, "pan.___", state 1198, "(1)"
+       line 253, "pan.___", state 1199, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1199, "else"
+       line 257, "pan.___", state 1206, "(1)"
+       line 257, "pan.___", state 1207, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1207, "else"
+       line 262, "pan.___", state 1216, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1216, "else"
+       line 1112, "pan.___", state 1219, "i = 0"
+       line 1112, "pan.___", state 1221, "reader_barrier = 1"
+       line 1112, "pan.___", state 1232, "((i<1))"
+       line 1112, "pan.___", state 1232, "((i>=1))"
+       line 268, "pan.___", state 1237, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1239, "(1)"
+       line 272, "pan.___", state 1246, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1248, "(1)"
+       line 272, "pan.___", state 1249, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1249, "else"
+       line 270, "pan.___", state 1254, "((i<1))"
+       line 270, "pan.___", state 1254, "((i>=1))"
+       line 276, "pan.___", state 1259, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1261, "(1)"
+       line 276, "pan.___", state 1262, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1262, "else"
+       line 280, "pan.___", state 1268, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1270, "(1)"
+       line 280, "pan.___", state 1271, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1271, "else"
+       line 278, "pan.___", state 1276, "((i<2))"
+       line 278, "pan.___", state 1276, "((i>=2))"
+       line 245, "pan.___", state 1284, "(1)"
+       line 249, "pan.___", state 1292, "(1)"
+       line 249, "pan.___", state 1293, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1293, "else"
+       line 247, "pan.___", state 1298, "((i<1))"
+       line 247, "pan.___", state 1298, "((i>=1))"
+       line 253, "pan.___", state 1304, "(1)"
+       line 253, "pan.___", state 1305, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1305, "else"
+       line 257, "pan.___", state 1312, "(1)"
+       line 257, "pan.___", state 1313, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1313, "else"
+       line 262, "pan.___", state 1322, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1322, "else"
+       line 295, "pan.___", state 1324, "(cache_dirty_urcu_gp_ctr)"
+       line 295, "pan.___", state 1324, "else"
+       line 1112, "pan.___", state 1325, "(cache_dirty_urcu_gp_ctr)"
+       line 1112, "pan.___", state 1325, "else"
+       line 272, "pan.___", state 1338, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1351, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1360, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1376, "(1)"
+       line 249, "pan.___", state 1384, "(1)"
+       line 253, "pan.___", state 1396, "(1)"
+       line 257, "pan.___", state 1404, "(1)"
+       line 268, "pan.___", state 1435, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1444, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1457, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1466, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1482, "(1)"
+       line 249, "pan.___", state 1490, "(1)"
+       line 253, "pan.___", state 1502, "(1)"
+       line 257, "pan.___", state 1510, "(1)"
+       line 268, "pan.___", state 1527, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1529, "(1)"
+       line 272, "pan.___", state 1536, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1538, "(1)"
+       line 272, "pan.___", state 1539, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1539, "else"
+       line 270, "pan.___", state 1544, "((i<1))"
+       line 270, "pan.___", state 1544, "((i>=1))"
+       line 276, "pan.___", state 1549, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1551, "(1)"
+       line 276, "pan.___", state 1552, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1552, "else"
+       line 280, "pan.___", state 1558, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1560, "(1)"
+       line 280, "pan.___", state 1561, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1561, "else"
+       line 278, "pan.___", state 1566, "((i<2))"
+       line 278, "pan.___", state 1566, "((i>=2))"
+       line 245, "pan.___", state 1574, "(1)"
+       line 249, "pan.___", state 1582, "(1)"
+       line 249, "pan.___", state 1583, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1583, "else"
+       line 247, "pan.___", state 1588, "((i<1))"
+       line 247, "pan.___", state 1588, "((i>=1))"
+       line 253, "pan.___", state 1594, "(1)"
+       line 253, "pan.___", state 1595, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1595, "else"
+       line 257, "pan.___", state 1602, "(1)"
+       line 257, "pan.___", state 1603, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1603, "else"
+       line 262, "pan.___", state 1612, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1612, "else"
+       line 1119, "pan.___", state 1615, "i = 0"
+       line 1119, "pan.___", state 1617, "reader_barrier = 1"
+       line 1119, "pan.___", state 1628, "((i<1))"
+       line 1119, "pan.___", state 1628, "((i>=1))"
+       line 268, "pan.___", state 1633, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1635, "(1)"
+       line 272, "pan.___", state 1642, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1644, "(1)"
+       line 272, "pan.___", state 1645, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1645, "else"
+       line 270, "pan.___", state 1650, "((i<1))"
+       line 270, "pan.___", state 1650, "((i>=1))"
+       line 276, "pan.___", state 1655, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1657, "(1)"
+       line 276, "pan.___", state 1658, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1658, "else"
+       line 280, "pan.___", state 1664, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1666, "(1)"
+       line 280, "pan.___", state 1667, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1667, "else"
+       line 278, "pan.___", state 1672, "((i<2))"
+       line 278, "pan.___", state 1672, "((i>=2))"
+       line 245, "pan.___", state 1680, "(1)"
+       line 249, "pan.___", state 1688, "(1)"
+       line 249, "pan.___", state 1689, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1689, "else"
+       line 247, "pan.___", state 1694, "((i<1))"
+       line 247, "pan.___", state 1694, "((i>=1))"
+       line 253, "pan.___", state 1700, "(1)"
+       line 253, "pan.___", state 1701, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1701, "else"
+       line 257, "pan.___", state 1708, "(1)"
+       line 257, "pan.___", state 1709, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1709, "else"
+       line 262, "pan.___", state 1718, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1718, "else"
+       line 295, "pan.___", state 1720, "(cache_dirty_urcu_gp_ctr)"
+       line 295, "pan.___", state 1720, "else"
+       line 1119, "pan.___", state 1721, "(cache_dirty_urcu_gp_ctr)"
+       line 1119, "pan.___", state 1721, "else"
+       line 1123, "pan.___", state 1724, "-end-"
+       (312 of 1724 states)
+unreached in proctype :init:
+       line 1138, "pan.___", state 11, "((i<1))"
+       line 1138, "pan.___", state 11, "((i>=1))"
+       (1 of 26 states)
+unreached in proctype :never:
+       line 1184, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 23.7 seconds
+pan: rate 5259.7381 states/second
+pan: avg transition delay 1.5447e-06 usec
+cp .input.spin urcu_free_no_mb.spin.input
+cp .input.spin.trail urcu_free_no_mb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.spin.input b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.spin.input
new file mode 100644 (file)
index 0000000..af4dcf5
--- /dev/null
@@ -0,0 +1,1157 @@
+#define NO_MB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.spin.input.trail b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_mb.spin.input.trail
new file mode 100644 (file)
index 0000000..37c6ecd
--- /dev/null
@@ -0,0 +1,1249 @@
+-2:3:-2
+-4:-4:-4
+1:0:2890
+2:2:1138
+3:2:1143
+4:2:1147
+5:2:1155
+6:2:1159
+7:2:1163
+8:0:2890
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:2890
+16:3:2862
+17:3:2865
+18:3:2870
+19:3:2877
+20:3:2880
+21:3:2884
+22:3:2885
+23:0:2890
+24:3:2887
+25:0:2890
+26:2:1167
+27:0:2890
+28:2:1173
+29:0:2890
+30:2:1174
+31:0:2890
+32:2:1176
+33:0:2890
+34:2:1177
+35:0:2890
+36:2:1178
+37:0:2890
+38:2:1179
+39:0:2890
+40:2:1180
+41:2:1181
+42:2:1185
+43:2:1186
+44:2:1194
+45:2:1195
+46:2:1199
+47:2:1200
+48:2:1208
+49:2:1213
+50:2:1217
+51:2:1218
+52:2:1226
+53:2:1227
+54:2:1231
+55:2:1232
+56:2:1226
+57:2:1227
+58:2:1231
+59:2:1232
+60:2:1240
+61:2:1245
+62:2:1246
+63:2:1257
+64:2:1258
+65:2:1259
+66:2:1270
+67:2:1275
+68:2:1276
+69:2:1287
+70:2:1288
+71:2:1289
+72:2:1287
+73:2:1288
+74:2:1289
+75:2:1300
+76:2:1308
+77:0:2890
+78:2:1179
+79:0:2890
+80:2:1312
+81:2:1316
+82:2:1317
+83:2:1321
+84:2:1325
+85:2:1326
+86:2:1330
+87:2:1338
+88:2:1339
+89:2:1343
+90:2:1347
+91:2:1348
+92:2:1343
+93:2:1344
+94:2:1352
+95:0:2890
+96:2:1179
+97:0:2890
+98:2:1360
+99:2:1361
+100:2:1362
+101:0:2890
+102:2:1179
+103:0:2890
+104:2:1370
+105:0:2890
+106:2:1179
+107:0:2890
+108:2:1373
+109:2:1374
+110:2:1378
+111:2:1379
+112:2:1387
+113:2:1388
+114:2:1392
+115:2:1393
+116:2:1401
+117:2:1406
+118:2:1407
+119:2:1419
+120:2:1420
+121:2:1424
+122:2:1425
+123:2:1419
+124:2:1420
+125:2:1424
+126:2:1425
+127:2:1433
+128:2:1438
+129:2:1439
+130:2:1450
+131:2:1451
+132:2:1452
+133:2:1463
+134:2:1468
+135:2:1469
+136:2:1480
+137:2:1481
+138:2:1482
+139:2:1480
+140:2:1481
+141:2:1482
+142:2:1493
+143:2:1500
+144:0:2890
+145:2:1179
+146:0:2890
+147:2:1504
+148:2:1505
+149:2:1506
+150:2:1518
+151:2:1519
+152:2:1523
+153:2:1524
+154:2:1532
+155:2:1537
+156:2:1541
+157:2:1542
+158:2:1550
+159:2:1551
+160:2:1555
+161:2:1556
+162:2:1550
+163:2:1551
+164:2:1555
+165:2:1556
+166:2:1564
+167:2:1569
+168:2:1570
+169:2:1581
+170:2:1582
+171:2:1583
+172:2:1594
+173:2:1599
+174:2:1600
+175:2:1611
+176:2:1612
+177:2:1613
+178:2:1611
+179:2:1612
+180:2:1613
+181:2:1624
+182:2:1635
+183:2:1636
+184:0:2890
+185:2:1179
+186:0:2890
+187:2:1643
+188:2:1644
+189:2:1648
+190:2:1649
+191:2:1657
+192:2:1658
+193:2:1662
+194:2:1663
+195:2:1671
+196:2:1676
+197:2:1680
+198:2:1681
+199:2:1689
+200:2:1690
+201:2:1694
+202:2:1695
+203:2:1689
+204:2:1690
+205:2:1694
+206:2:1695
+207:2:1703
+208:2:1708
+209:2:1709
+210:2:1720
+211:2:1721
+212:2:1722
+213:2:1733
+214:2:1738
+215:2:1739
+216:2:1750
+217:2:1751
+218:2:1752
+219:2:1750
+220:2:1751
+221:2:1752
+222:2:1763
+223:0:2890
+224:2:1179
+225:0:2890
+226:2:1772
+227:2:1773
+228:2:1777
+229:2:1778
+230:2:1786
+231:2:1787
+232:2:1791
+233:2:1792
+234:2:1800
+235:2:1805
+236:2:1809
+237:2:1810
+238:2:1818
+239:2:1819
+240:2:1823
+241:2:1824
+242:2:1818
+243:2:1819
+244:2:1823
+245:2:1824
+246:2:1832
+247:2:1837
+248:2:1838
+249:2:1849
+250:2:1850
+251:2:1851
+252:2:1862
+253:2:1867
+254:2:1868
+255:2:1879
+256:2:1880
+257:2:1881
+258:2:1879
+259:2:1880
+260:2:1881
+261:2:1892
+262:2:1899
+263:0:2890
+264:2:1179
+265:0:2890
+266:2:1903
+267:2:1904
+268:2:1905
+269:2:1917
+270:2:1918
+271:2:1922
+272:2:1923
+273:2:1931
+274:2:1936
+275:2:1940
+276:2:1941
+277:2:1949
+278:2:1950
+279:2:1954
+280:2:1955
+281:2:1949
+282:2:1950
+283:2:1954
+284:2:1955
+285:2:1963
+286:2:1968
+287:2:1969
+288:2:1980
+289:2:1981
+290:2:1982
+291:2:1993
+292:2:1998
+293:2:1999
+294:2:2010
+295:2:2011
+296:2:2012
+297:2:2010
+298:2:2011
+299:2:2012
+300:2:2023
+301:2:2033
+302:2:2034
+303:0:2890
+304:2:1179
+305:0:2890
+306:2:2043
+307:2:2044
+308:0:2890
+309:2:1179
+310:0:2890
+311:2:2048
+312:0:2890
+313:2:2056
+314:0:2890
+315:2:1174
+316:0:2890
+317:2:1176
+318:0:2890
+319:2:1177
+320:0:2890
+321:2:1178
+322:0:2890
+323:2:1179
+324:0:2890
+325:2:1180
+326:2:1181
+327:2:1185
+328:2:1186
+329:2:1194
+330:2:1195
+331:2:1199
+332:2:1200
+333:2:1208
+334:2:1213
+335:2:1217
+336:2:1218
+337:2:1226
+338:2:1227
+339:2:1228
+340:2:1226
+341:2:1227
+342:2:1231
+343:2:1232
+344:2:1240
+345:2:1245
+346:2:1246
+347:2:1257
+348:2:1258
+349:2:1259
+350:2:1270
+351:2:1275
+352:2:1276
+353:2:1287
+354:2:1288
+355:2:1289
+356:2:1287
+357:2:1288
+358:2:1289
+359:2:1300
+360:2:1308
+361:0:2890
+362:2:1179
+363:0:2890
+364:2:1312
+365:2:1316
+366:2:1317
+367:2:1321
+368:2:1325
+369:2:1326
+370:2:1330
+371:2:1338
+372:2:1339
+373:2:1343
+374:2:1344
+375:2:1343
+376:2:1347
+377:2:1348
+378:2:1352
+379:0:2890
+380:2:1179
+381:0:2890
+382:2:1360
+383:2:1361
+384:2:1362
+385:0:2890
+386:2:1179
+387:0:2890
+388:2:1370
+389:0:2890
+390:2:1179
+391:0:2890
+392:2:1373
+393:2:1374
+394:2:1378
+395:2:1379
+396:2:1387
+397:2:1388
+398:2:1392
+399:2:1393
+400:2:1401
+401:2:1406
+402:2:1407
+403:2:1419
+404:2:1420
+405:2:1424
+406:2:1425
+407:2:1419
+408:2:1420
+409:2:1424
+410:2:1425
+411:2:1433
+412:2:1438
+413:2:1439
+414:2:1450
+415:2:1451
+416:2:1452
+417:2:1463
+418:2:1468
+419:2:1469
+420:2:1480
+421:2:1481
+422:2:1482
+423:2:1480
+424:2:1481
+425:2:1482
+426:2:1493
+427:2:1500
+428:0:2890
+429:2:1179
+430:0:2890
+431:2:1504
+432:2:1505
+433:2:1506
+434:2:1518
+435:2:1519
+436:2:1523
+437:2:1524
+438:2:1532
+439:2:1537
+440:2:1541
+441:2:1542
+442:2:1550
+443:2:1551
+444:2:1555
+445:2:1556
+446:2:1550
+447:2:1551
+448:2:1555
+449:2:1556
+450:2:1564
+451:2:1569
+452:2:1570
+453:2:1581
+454:2:1582
+455:2:1583
+456:2:1594
+457:2:1599
+458:2:1600
+459:2:1611
+460:2:1612
+461:2:1613
+462:2:1611
+463:2:1612
+464:2:1613
+465:2:1624
+466:2:1635
+467:2:1636
+468:0:2890
+469:2:1179
+470:0:2890
+471:2:1643
+472:2:1644
+473:2:1648
+474:2:1649
+475:2:1657
+476:2:1658
+477:2:1662
+478:2:1663
+479:2:1671
+480:2:1676
+481:2:1680
+482:2:1681
+483:2:1689
+484:2:1690
+485:2:1694
+486:2:1695
+487:2:1689
+488:2:1690
+489:2:1694
+490:2:1695
+491:2:1703
+492:2:1708
+493:2:1709
+494:2:1720
+495:2:1721
+496:2:1722
+497:2:1733
+498:2:1738
+499:2:1739
+500:2:1750
+501:2:1751
+502:2:1752
+503:2:1750
+504:2:1751
+505:2:1752
+506:2:1763
+507:0:2890
+508:2:1179
+509:0:2890
+510:2:1772
+511:2:1773
+512:2:1777
+513:2:1778
+514:2:1786
+515:2:1787
+516:2:1791
+517:2:1792
+518:2:1800
+519:2:1805
+520:2:1809
+521:2:1810
+522:2:1818
+523:2:1819
+524:2:1823
+525:2:1824
+526:2:1818
+527:2:1819
+528:2:1823
+529:2:1824
+530:2:1832
+531:2:1837
+532:2:1838
+533:2:1849
+534:2:1850
+535:2:1851
+536:2:1862
+537:2:1867
+538:2:1868
+539:2:1879
+540:2:1880
+541:2:1881
+542:2:1879
+543:2:1880
+544:2:1881
+545:2:1892
+546:2:1899
+547:0:2890
+548:2:1179
+549:0:2890
+550:2:1903
+551:2:1904
+552:2:1905
+553:2:1917
+554:2:1918
+555:2:1922
+556:2:1923
+557:2:1931
+558:2:1936
+559:2:1940
+560:2:1941
+561:2:1949
+562:2:1950
+563:2:1954
+564:2:1955
+565:2:1949
+566:2:1950
+567:2:1954
+568:2:1955
+569:2:1963
+570:2:1968
+571:2:1969
+572:2:1980
+573:2:1981
+574:2:1982
+575:2:1993
+576:2:1998
+577:2:1999
+578:2:2010
+579:2:2011
+580:2:2012
+581:2:2010
+582:2:2011
+583:2:2012
+584:2:2023
+585:2:2033
+586:2:2034
+587:0:2890
+588:2:1179
+589:0:2890
+590:2:2043
+591:2:2044
+592:0:2890
+593:2:1179
+594:0:2890
+595:2:2048
+596:0:2890
+597:2:2056
+598:0:2890
+599:2:1174
+600:0:2890
+601:2:1176
+602:0:2890
+603:2:1177
+604:0:2890
+605:2:1178
+606:0:2890
+607:2:1179
+608:0:2890
+609:2:1180
+610:2:1181
+611:2:1185
+612:2:1186
+613:2:1194
+614:2:1195
+615:2:1199
+616:2:1200
+617:2:1208
+618:2:1213
+619:2:1217
+620:2:1218
+621:2:1226
+622:2:1227
+623:2:1231
+624:2:1232
+625:2:1226
+626:2:1227
+627:2:1228
+628:2:1240
+629:2:1245
+630:2:1246
+631:2:1257
+632:2:1258
+633:2:1259
+634:2:1270
+635:2:1275
+636:2:1276
+637:2:1287
+638:2:1288
+639:2:1289
+640:2:1287
+641:2:1288
+642:2:1289
+643:2:1300
+644:2:1308
+645:0:2890
+646:2:1179
+647:0:2890
+648:2:1312
+649:2:1316
+650:2:1317
+651:2:1321
+652:2:1325
+653:2:1326
+654:2:1330
+655:2:1338
+656:2:1339
+657:2:1343
+658:2:1347
+659:2:1348
+660:2:1343
+661:2:1344
+662:2:1352
+663:0:2890
+664:2:1179
+665:0:2890
+666:2:1360
+667:2:1361
+668:2:1362
+669:0:2890
+670:2:1179
+671:0:2890
+672:2:1370
+673:0:2890
+674:2:1179
+675:0:2890
+676:2:1373
+677:2:1374
+678:2:1378
+679:2:1379
+680:2:1387
+681:2:1388
+682:2:1392
+683:2:1393
+684:2:1401
+685:2:1406
+686:2:1407
+687:2:1419
+688:2:1420
+689:2:1424
+690:2:1425
+691:2:1419
+692:2:1420
+693:2:1424
+694:2:1425
+695:2:1433
+696:2:1438
+697:2:1439
+698:2:1450
+699:2:1451
+700:2:1452
+701:2:1463
+702:2:1468
+703:2:1469
+704:2:1480
+705:2:1481
+706:2:1482
+707:2:1480
+708:2:1481
+709:2:1482
+710:2:1493
+711:2:1500
+712:0:2890
+713:2:1179
+714:0:2890
+715:2:1504
+716:2:1505
+717:2:1506
+718:2:1518
+719:2:1519
+720:2:1523
+721:2:1524
+722:2:1532
+723:2:1537
+724:2:1541
+725:2:1542
+726:2:1550
+727:2:1551
+728:2:1555
+729:2:1556
+730:2:1550
+731:2:1551
+732:2:1555
+733:2:1556
+734:2:1564
+735:2:1569
+736:2:1570
+737:2:1581
+738:2:1582
+739:2:1583
+740:2:1594
+741:2:1599
+742:2:1600
+743:2:1611
+744:2:1612
+745:2:1613
+746:2:1611
+747:2:1612
+748:2:1613
+749:2:1624
+750:2:1635
+751:2:1636
+752:0:2890
+753:2:1179
+754:0:2890
+755:2:1643
+756:2:1644
+757:2:1648
+758:2:1649
+759:2:1657
+760:2:1658
+761:2:1662
+762:2:1663
+763:2:1671
+764:2:1676
+765:2:1680
+766:2:1681
+767:2:1689
+768:2:1690
+769:2:1694
+770:2:1695
+771:2:1689
+772:2:1690
+773:2:1694
+774:2:1695
+775:2:1703
+776:2:1708
+777:2:1709
+778:2:1720
+779:2:1721
+780:2:1722
+781:2:1733
+782:2:1738
+783:2:1739
+784:2:1750
+785:2:1751
+786:2:1752
+787:2:1750
+788:2:1751
+789:2:1752
+790:2:1763
+791:0:2890
+792:2:1179
+793:0:2890
+794:2:1903
+795:2:1904
+796:2:1908
+797:2:1909
+798:2:1917
+799:2:1918
+800:2:1922
+801:2:1923
+802:2:1931
+803:2:1936
+804:2:1940
+805:2:1941
+806:2:1949
+807:2:1950
+808:2:1954
+809:2:1955
+810:2:1949
+811:2:1950
+812:2:1954
+813:2:1955
+814:2:1963
+815:2:1968
+816:2:1969
+817:2:1980
+818:2:1981
+819:2:1982
+820:2:1993
+821:2:1998
+822:2:1999
+823:2:2010
+824:2:2011
+825:2:2012
+826:2:2010
+827:2:2011
+828:2:2012
+829:2:2023
+830:2:2033
+831:2:2034
+832:0:2890
+833:2:1179
+834:0:2890
+835:2:2043
+836:2:2044
+837:0:2890
+838:2:1179
+839:0:2890
+840:2:1772
+841:2:1773
+842:2:1777
+843:2:1778
+844:2:1786
+845:2:1787
+846:2:1791
+847:2:1792
+848:2:1800
+849:2:1805
+850:2:1809
+851:2:1810
+852:2:1818
+853:2:1819
+854:2:1820
+855:2:1818
+856:2:1819
+857:2:1823
+858:2:1824
+859:2:1832
+860:2:1837
+861:2:1838
+862:2:1849
+863:2:1850
+864:2:1851
+865:2:1862
+866:2:1867
+867:2:1868
+868:2:1879
+869:2:1880
+870:2:1881
+871:2:1879
+872:2:1880
+873:2:1881
+874:2:1892
+875:2:1899
+876:0:2890
+877:2:1179
+878:0:2890
+879:2:2048
+880:0:2890
+881:2:2056
+882:0:2890
+883:2:2057
+884:0:2890
+885:2:2062
+886:0:2890
+887:1:29
+888:0:2890
+889:2:2063
+890:0:2890
+891:1:35
+892:0:2890
+893:2:2062
+894:0:2890
+895:1:36
+896:0:2890
+897:2:2063
+898:0:2890
+899:1:37
+900:0:2890
+901:2:2062
+902:0:2890
+903:1:38
+904:0:2890
+905:2:2063
+906:0:2890
+907:1:39
+908:0:2890
+909:2:2062
+910:0:2890
+911:1:40
+912:0:2890
+913:2:2063
+914:0:2890
+915:1:41
+916:0:2890
+917:2:2062
+918:0:2890
+919:1:42
+920:0:2890
+921:2:2063
+922:0:2890
+923:1:43
+924:0:2890
+925:2:2062
+926:0:2890
+927:1:44
+928:0:2890
+929:2:2063
+930:0:2890
+931:1:145
+932:0:2890
+933:2:2062
+934:0:2890
+935:1:147
+936:0:2890
+937:2:2063
+938:0:2890
+939:1:46
+940:0:2890
+941:2:2062
+942:0:2890
+943:1:153
+944:1:154
+945:1:158
+946:1:159
+947:1:167
+948:1:168
+949:1:172
+950:1:173
+951:1:181
+952:1:186
+953:1:190
+954:1:191
+955:1:199
+956:1:200
+957:1:204
+958:1:205
+959:1:199
+960:1:200
+961:1:204
+962:1:205
+963:1:213
+964:1:218
+965:1:219
+966:1:230
+967:1:231
+968:1:232
+969:1:243
+970:1:255
+971:1:256
+972:1:260
+973:1:261
+974:1:262
+975:1:260
+976:1:261
+977:1:262
+978:1:273
+979:0:2890
+980:2:2063
+981:0:2890
+982:1:42
+983:0:2890
+984:2:2062
+985:0:2890
+986:1:43
+987:0:2890
+988:2:2063
+989:0:2890
+990:1:44
+991:0:2890
+992:2:2062
+993:0:2890
+994:1:145
+995:0:2890
+996:2:2063
+997:0:2890
+998:1:147
+999:0:2890
+1000:2:2062
+1001:0:2890
+1002:1:46
+1003:0:2890
+1004:2:2063
+1005:0:2890
+1006:1:282
+1007:1:283
+1008:0:2890
+1009:2:2062
+1010:0:2890
+1011:1:42
+1012:0:2890
+1013:2:2063
+1014:0:2890
+1015:1:43
+1016:0:2890
+1017:2:2062
+1018:0:2890
+1019:1:44
+1020:0:2890
+1021:2:2063
+1022:0:2890
+1023:1:145
+1024:0:2890
+1025:2:2062
+1026:0:2890
+1027:1:147
+1028:0:2890
+1029:2:2063
+1030:0:2890
+1031:1:46
+1032:0:2890
+1033:2:2062
+1034:0:2890
+1035:1:289
+1036:1:290
+1037:1:294
+1038:1:295
+1039:1:303
+1040:1:304
+1041:1:308
+1042:1:309
+1043:1:317
+1044:1:322
+1045:1:326
+1046:1:327
+1047:1:335
+1048:1:336
+1049:1:340
+1050:1:341
+1051:1:335
+1052:1:336
+1053:1:340
+1054:1:341
+1055:1:349
+1056:1:354
+1057:1:355
+1058:1:366
+1059:1:367
+1060:1:368
+1061:1:379
+1062:1:391
+1063:1:392
+1064:1:396
+1065:1:397
+1066:1:398
+1067:1:396
+1068:1:397
+1069:1:398
+1070:1:409
+1071:0:2890
+1072:2:2063
+1073:0:2890
+1074:1:42
+1075:0:2890
+1076:2:2062
+1077:0:2890
+1078:1:43
+1079:0:2890
+1080:2:2063
+1081:0:2890
+1082:1:44
+1083:0:2890
+1084:2:2062
+1085:0:2890
+1086:1:145
+1087:0:2890
+1088:2:2063
+1089:0:2890
+1090:1:147
+1091:0:2890
+1092:2:2062
+1093:0:2890
+1094:1:46
+1095:0:2890
+1096:2:2063
+1097:0:2890
+1098:1:418
+1099:1:419
+1100:1:423
+1101:1:424
+1102:1:432
+1103:1:433
+1104:1:437
+1105:1:438
+1106:1:446
+1107:1:451
+1108:1:455
+1109:1:456
+1110:1:464
+1111:1:465
+1112:1:469
+1113:1:470
+1114:1:464
+1115:1:465
+1116:1:469
+1117:1:470
+1118:1:478
+1119:1:483
+1120:1:484
+1121:1:495
+1122:1:496
+1123:1:497
+1124:1:508
+1125:1:520
+1126:1:521
+1127:1:525
+1128:1:526
+1129:1:527
+1130:1:525
+1131:1:526
+1132:1:527
+1133:1:538
+1134:1:545
+1135:0:2890
+1136:2:2062
+1137:0:2890
+1138:1:42
+1139:0:2890
+1140:2:2063
+1141:0:2890
+1142:1:43
+1143:0:2890
+1144:2:2062
+1145:0:2890
+1146:1:44
+1147:0:2890
+1148:2:2063
+1149:0:2890
+1150:1:145
+1151:0:2890
+1152:2:2062
+1153:0:2890
+1154:1:147
+1155:0:2890
+1156:2:2063
+1157:0:2890
+1158:1:46
+1159:0:2890
+1160:2:2062
+1161:0:2890
+1162:1:683
+1163:1:684
+1164:1:688
+1165:1:689
+1166:1:697
+1167:1:698
+1168:1:699
+1169:1:711
+1170:1:716
+1171:1:720
+1172:1:721
+1173:1:729
+1174:1:730
+1175:1:734
+1176:1:735
+1177:1:729
+1178:1:730
+1179:1:734
+1180:1:735
+1181:1:743
+1182:1:748
+1183:1:749
+1184:1:760
+1185:1:761
+1186:1:762
+1187:1:773
+1188:1:785
+1189:1:786
+1190:1:790
+1191:1:791
+1192:1:792
+1193:1:790
+1194:1:791
+1195:1:792
+1196:1:803
+1197:0:2890
+1198:2:2063
+1199:0:2890
+1200:1:42
+1201:0:2890
+1202:2:2062
+1203:0:2890
+1204:1:43
+1205:0:2890
+1206:2:2063
+1207:0:2890
+1208:1:44
+1209:0:2890
+1210:2:2062
+1211:0:2890
+1212:1:145
+1213:0:2890
+1214:2:2063
+1215:0:2890
+1216:1:147
+1217:0:2890
+1218:2:2062
+1219:0:2890
+1220:1:46
+1221:0:2890
+1222:2:2063
+1223:0:2890
+1224:1:812
+1225:0:2890
+1226:2:2062
+1227:0:2890
+1228:1:1089
+1229:1:1096
+1230:1:1097
+1231:1:1104
+1232:1:1109
+1233:1:1116
+1234:1:1117
+1235:1:1116
+1236:1:1117
+1237:1:1124
+1238:1:1128
+1239:0:2890
+1240:2:2063
+1241:0:2890
+1242:1:814
+1243:1:815
+1244:0:2888
+1245:2:2062
+1246:0:2894
+1247:1:1049
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.define b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.define
new file mode 100644 (file)
index 0000000..73e61a4
--- /dev/null
@@ -0,0 +1 @@
+#define NO_RMB
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.log b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.log
new file mode 100644 (file)
index 0000000..93939dd
--- /dev/null
@@ -0,0 +1,329 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_rmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1179)
+pan: claim violated! (at depth 1476)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 3979, errors: 1
+   430584 states, stored
+ 99483765 states, matched
+ 99914349 transitions (= stored+matched)
+5.7035561e+08 atomic steps
+hash conflicts:  13587432 (resolved)
+
+Stats on memory usage (in Megabytes):
+   47.634      equivalent memory usage for states (stored*(State-vector + overhead))
+   20.652      actual memory usage for states (compression: 43.36%)
+               state-vector as stored = 14 byte + 36 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  486.369      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 7914 682 569 2 2 ]
+unreached in proctype urcu_reader
+       line 713, "pan.___", state 12, "((i<1))"
+       line 713, "pan.___", state 12, "((i>=1))"
+       line 268, "pan.___", state 55, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 86, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 102, "(1)"
+       line 249, "pan.___", state 110, "(1)"
+       line 253, "pan.___", state 122, "(1)"
+       line 257, "pan.___", state 130, "(1)"
+       line 404, "pan.___", state 156, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 188, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 202, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 221, "(1)"
+       line 431, "pan.___", state 251, "(1)"
+       line 435, "pan.___", state 264, "(1)"
+       line 614, "pan.___", state 285, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 404, "pan.___", state 292, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 324, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 338, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 357, "(1)"
+       line 431, "pan.___", state 387, "(1)"
+       line 435, "pan.___", state 400, "(1)"
+       line 404, "pan.___", state 421, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 453, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 467, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 486, "(1)"
+       line 431, "pan.___", state 516, "(1)"
+       line 435, "pan.___", state 529, "(1)"
+       line 404, "pan.___", state 552, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 554, "(1)"
+       line 404, "pan.___", state 555, "(cache_dirty_urcu_gp_ctr)"
+       line 404, "pan.___", state 555, "else"
+       line 404, "pan.___", state 558, "(1)"
+       line 408, "pan.___", state 566, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 568, "(1)"
+       line 408, "pan.___", state 569, "(cache_dirty_urcu_active_readers)"
+       line 408, "pan.___", state 569, "else"
+       line 408, "pan.___", state 572, "(1)"
+       line 408, "pan.___", state 573, "(1)"
+       line 408, "pan.___", state 573, "(1)"
+       line 406, "pan.___", state 578, "((i<1))"
+       line 406, "pan.___", state 578, "((i>=1))"
+       line 413, "pan.___", state 584, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 586, "(1)"
+       line 413, "pan.___", state 587, "(cache_dirty_rcu_ptr)"
+       line 413, "pan.___", state 587, "else"
+       line 413, "pan.___", state 590, "(1)"
+       line 413, "pan.___", state 591, "(1)"
+       line 413, "pan.___", state 591, "(1)"
+       line 417, "pan.___", state 598, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 600, "(1)"
+       line 417, "pan.___", state 601, "(cache_dirty_rcu_data[i])"
+       line 417, "pan.___", state 601, "else"
+       line 417, "pan.___", state 604, "(1)"
+       line 417, "pan.___", state 605, "(1)"
+       line 417, "pan.___", state 605, "(1)"
+       line 415, "pan.___", state 610, "((i<2))"
+       line 415, "pan.___", state 610, "((i>=2))"
+       line 422, "pan.___", state 617, "(1)"
+       line 422, "pan.___", state 618, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 618, "else"
+       line 422, "pan.___", state 621, "(1)"
+       line 422, "pan.___", state 622, "(1)"
+       line 422, "pan.___", state 622, "(1)"
+       line 426, "pan.___", state 630, "(1)"
+       line 426, "pan.___", state 631, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 631, "else"
+       line 426, "pan.___", state 634, "(1)"
+       line 426, "pan.___", state 635, "(1)"
+       line 426, "pan.___", state 635, "(1)"
+       line 424, "pan.___", state 640, "((i<1))"
+       line 424, "pan.___", state 640, "((i>=1))"
+       line 431, "pan.___", state 647, "(1)"
+       line 431, "pan.___", state 648, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 648, "else"
+       line 431, "pan.___", state 651, "(1)"
+       line 431, "pan.___", state 652, "(1)"
+       line 431, "pan.___", state 652, "(1)"
+       line 435, "pan.___", state 660, "(1)"
+       line 435, "pan.___", state 661, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 661, "else"
+       line 435, "pan.___", state 664, "(1)"
+       line 435, "pan.___", state 665, "(1)"
+       line 435, "pan.___", state 665, "(1)"
+       line 433, "pan.___", state 670, "((i<2))"
+       line 433, "pan.___", state 670, "((i>=2))"
+       line 443, "pan.___", state 674, "(1)"
+       line 443, "pan.___", state 674, "(1)"
+       line 614, "pan.___", state 677, "cached_urcu_active_readers = (tmp+1)"
+       line 614, "pan.___", state 678, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 614, "pan.___", state 679, "(1)"
+       line 404, "pan.___", state 686, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 718, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 732, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 751, "(1)"
+       line 431, "pan.___", state 781, "(1)"
+       line 435, "pan.___", state 794, "(1)"
+       line 404, "pan.___", state 821, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 853, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 867, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 886, "(1)"
+       line 431, "pan.___", state 916, "(1)"
+       line 435, "pan.___", state 929, "(1)"
+       line 404, "pan.___", state 950, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 982, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 996, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 1015, "(1)"
+       line 431, "pan.___", state 1045, "(1)"
+       line 435, "pan.___", state 1058, "(1)"
+       line 404, "pan.___", state 1091, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 1123, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 1137, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 1156, "(1)"
+       line 431, "pan.___", state 1186, "(1)"
+       line 435, "pan.___", state 1199, "(1)"
+       line 748, "pan.___", state 1220, "-end-"
+       (95 of 1220 states)
+unreached in proctype urcu_writer
+       line 837, "pan.___", state 12, "((i<1))"
+       line 837, "pan.___", state 12, "((i>=1))"
+       line 404, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 51, "(1)"
+       line 408, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 65, "(1)"
+       line 408, "pan.___", state 66, "(1)"
+       line 408, "pan.___", state 66, "(1)"
+       line 406, "pan.___", state 71, "((i<1))"
+       line 406, "pan.___", state 71, "((i>=1))"
+       line 413, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 83, "(1)"
+       line 413, "pan.___", state 84, "(1)"
+       line 413, "pan.___", state 84, "(1)"
+       line 417, "pan.___", state 97, "(1)"
+       line 417, "pan.___", state 98, "(1)"
+       line 417, "pan.___", state 98, "(1)"
+       line 415, "pan.___", state 103, "((i<2))"
+       line 415, "pan.___", state 103, "((i>=2))"
+       line 422, "pan.___", state 110, "(1)"
+       line 422, "pan.___", state 111, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 111, "else"
+       line 422, "pan.___", state 114, "(1)"
+       line 422, "pan.___", state 115, "(1)"
+       line 422, "pan.___", state 115, "(1)"
+       line 426, "pan.___", state 123, "(1)"
+       line 426, "pan.___", state 124, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 124, "else"
+       line 426, "pan.___", state 127, "(1)"
+       line 426, "pan.___", state 128, "(1)"
+       line 426, "pan.___", state 128, "(1)"
+       line 424, "pan.___", state 133, "((i<1))"
+       line 424, "pan.___", state 133, "((i>=1))"
+       line 431, "pan.___", state 140, "(1)"
+       line 431, "pan.___", state 141, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 141, "else"
+       line 431, "pan.___", state 144, "(1)"
+       line 431, "pan.___", state 145, "(1)"
+       line 431, "pan.___", state 145, "(1)"
+       line 435, "pan.___", state 153, "(1)"
+       line 435, "pan.___", state 154, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 154, "else"
+       line 435, "pan.___", state 157, "(1)"
+       line 435, "pan.___", state 158, "(1)"
+       line 435, "pan.___", state 158, "(1)"
+       line 433, "pan.___", state 163, "((i<2))"
+       line 433, "pan.___", state 163, "((i>=2))"
+       line 443, "pan.___", state 167, "(1)"
+       line 443, "pan.___", state 167, "(1)"
+       line 268, "pan.___", state 176, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 185, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 198, "cache_dirty_rcu_ptr = 0"
+       line 404, "pan.___", state 238, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 252, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 270, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 284, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 303, "(1)"
+       line 426, "pan.___", state 316, "(1)"
+       line 431, "pan.___", state 333, "(1)"
+       line 435, "pan.___", state 346, "(1)"
+       line 408, "pan.___", state 383, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 401, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 415, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 447, "(1)"
+       line 431, "pan.___", state 464, "(1)"
+       line 435, "pan.___", state 477, "(1)"
+       line 408, "pan.___", state 522, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 540, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 554, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 586, "(1)"
+       line 431, "pan.___", state 603, "(1)"
+       line 435, "pan.___", state 616, "(1)"
+       line 408, "pan.___", state 651, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 669, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 683, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 715, "(1)"
+       line 431, "pan.___", state 732, "(1)"
+       line 435, "pan.___", state 745, "(1)"
+       line 408, "pan.___", state 782, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 800, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 814, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 846, "(1)"
+       line 431, "pan.___", state 863, "(1)"
+       line 435, "pan.___", state 876, "(1)"
+       line 268, "pan.___", state 931, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 940, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 955, "(1)"
+       line 280, "pan.___", state 962, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 978, "(1)"
+       line 249, "pan.___", state 986, "(1)"
+       line 253, "pan.___", state 998, "(1)"
+       line 257, "pan.___", state 1006, "(1)"
+       line 268, "pan.___", state 1037, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1046, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1059, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1068, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1084, "(1)"
+       line 249, "pan.___", state 1092, "(1)"
+       line 253, "pan.___", state 1104, "(1)"
+       line 257, "pan.___", state 1112, "(1)"
+       line 272, "pan.___", state 1138, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1151, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1160, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1176, "(1)"
+       line 249, "pan.___", state 1184, "(1)"
+       line 253, "pan.___", state 1196, "(1)"
+       line 257, "pan.___", state 1204, "(1)"
+       line 268, "pan.___", state 1235, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1244, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1257, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1266, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1282, "(1)"
+       line 249, "pan.___", state 1290, "(1)"
+       line 253, "pan.___", state 1302, "(1)"
+       line 257, "pan.___", state 1310, "(1)"
+       line 272, "pan.___", state 1336, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1349, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1358, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1374, "(1)"
+       line 249, "pan.___", state 1382, "(1)"
+       line 253, "pan.___", state 1394, "(1)"
+       line 257, "pan.___", state 1402, "(1)"
+       line 268, "pan.___", state 1433, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1442, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1455, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1464, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1480, "(1)"
+       line 249, "pan.___", state 1488, "(1)"
+       line 253, "pan.___", state 1500, "(1)"
+       line 257, "pan.___", state 1508, "(1)"
+       line 272, "pan.___", state 1534, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1547, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1556, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1572, "(1)"
+       line 249, "pan.___", state 1580, "(1)"
+       line 253, "pan.___", state 1592, "(1)"
+       line 257, "pan.___", state 1600, "(1)"
+       line 268, "pan.___", state 1631, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1640, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1653, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1662, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1678, "(1)"
+       line 249, "pan.___", state 1686, "(1)"
+       line 253, "pan.___", state 1698, "(1)"
+       line 257, "pan.___", state 1706, "(1)"
+       line 1123, "pan.___", state 1722, "-end-"
+       (129 of 1722 states)
+unreached in proctype :init:
+       line 1138, "pan.___", state 11, "((i<1))"
+       line 1138, "pan.___", state 11, "((i>=1))"
+       (1 of 26 states)
+unreached in proctype :never:
+       line 1184, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 155 seconds
+pan: rate 2784.0683 states/second
+pan: avg transition delay 1.5479e-06 usec
+cp .input.spin urcu_free_no_rmb.spin.input
+cp .input.spin.trail urcu_free_no_rmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.spin.input b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.spin.input
new file mode 100644 (file)
index 0000000..44977a8
--- /dev/null
@@ -0,0 +1,1157 @@
+#define NO_RMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.spin.input.trail b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_rmb.spin.input.trail
new file mode 100644 (file)
index 0000000..931028a
--- /dev/null
@@ -0,0 +1,1479 @@
+-2:3:-2
+-4:-4:-4
+1:0:2970
+2:2:1220
+3:2:1225
+4:2:1229
+5:2:1237
+6:2:1241
+7:2:1245
+8:0:2970
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:2970
+16:3:2942
+17:3:2945
+18:3:2950
+19:3:2957
+20:3:2960
+21:3:2964
+22:3:2965
+23:0:2970
+24:3:2967
+25:0:2970
+26:2:1249
+27:0:2970
+28:2:1255
+29:0:2970
+30:2:1256
+31:0:2970
+32:2:1258
+33:0:2970
+34:2:1259
+35:0:2970
+36:2:1260
+37:2:1261
+38:2:1265
+39:2:1266
+40:2:1274
+41:2:1275
+42:2:1279
+43:2:1280
+44:2:1288
+45:2:1293
+46:2:1297
+47:2:1298
+48:2:1306
+49:2:1307
+50:2:1311
+51:2:1312
+52:2:1306
+53:2:1307
+54:2:1311
+55:2:1312
+56:2:1320
+57:2:1325
+58:2:1326
+59:2:1337
+60:2:1338
+61:2:1339
+62:2:1350
+63:2:1355
+64:2:1356
+65:2:1367
+66:2:1368
+67:2:1369
+68:2:1367
+69:2:1368
+70:2:1369
+71:2:1380
+72:2:1388
+73:0:2970
+74:2:1259
+75:0:2970
+76:2:1392
+77:2:1396
+78:2:1397
+79:2:1401
+80:2:1405
+81:2:1406
+82:2:1410
+83:2:1418
+84:2:1419
+85:2:1423
+86:2:1427
+87:2:1428
+88:2:1423
+89:2:1424
+90:2:1432
+91:0:2970
+92:2:1259
+93:0:2970
+94:2:1440
+95:2:1441
+96:2:1442
+97:0:2970
+98:2:1259
+99:0:2970
+100:2:1447
+101:0:2970
+102:2:2151
+103:2:2152
+104:2:2156
+105:2:2160
+106:2:2161
+107:2:2165
+108:2:2170
+109:2:2178
+110:2:2182
+111:2:2183
+112:2:2178
+113:2:2182
+114:2:2183
+115:2:2187
+116:2:2194
+117:2:2201
+118:2:2202
+119:2:2209
+120:2:2214
+121:2:2221
+122:2:2222
+123:2:2221
+124:2:2222
+125:2:2229
+126:2:2233
+127:0:2970
+128:2:2238
+129:0:2970
+130:2:2239
+131:0:2970
+132:2:2240
+133:0:2970
+134:2:2241
+135:0:2970
+136:1:29
+137:0:2970
+138:2:2242
+139:0:2970
+140:1:35
+141:0:2970
+142:1:36
+143:0:2970
+144:2:2241
+145:0:2970
+146:1:37
+147:0:2970
+148:2:2242
+149:0:2970
+150:1:38
+151:0:2970
+152:2:2241
+153:0:2970
+154:1:39
+155:0:2970
+156:2:2242
+157:0:2970
+158:1:40
+159:0:2970
+160:1:41
+161:0:2970
+162:2:2241
+163:0:2970
+164:1:42
+165:0:2970
+166:2:2242
+167:0:2970
+168:1:51
+169:0:2970
+170:2:2241
+171:0:2970
+172:1:55
+173:1:56
+174:1:60
+175:1:64
+176:1:65
+177:1:69
+178:1:77
+179:1:78
+180:1:82
+181:1:86
+182:1:87
+183:1:82
+184:1:86
+185:1:87
+186:1:91
+187:1:98
+188:1:105
+189:1:106
+190:1:113
+191:1:118
+192:1:125
+193:1:126
+194:1:125
+195:1:126
+196:1:133
+197:1:137
+198:0:2970
+199:2:2242
+200:0:2970
+201:1:142
+202:0:2970
+203:2:2243
+204:0:2970
+205:2:2248
+206:0:2970
+207:2:2249
+208:0:2970
+209:2:2257
+210:2:2258
+211:2:2262
+212:2:2266
+213:2:2267
+214:2:2271
+215:2:2279
+216:2:2280
+217:2:2284
+218:2:2288
+219:2:2289
+220:2:2284
+221:2:2288
+222:2:2289
+223:2:2293
+224:2:2300
+225:2:2307
+226:2:2308
+227:2:2315
+228:2:2320
+229:2:2327
+230:2:2328
+231:2:2327
+232:2:2328
+233:2:2335
+234:2:2339
+235:0:2970
+236:2:1449
+237:2:2132
+238:0:2970
+239:2:1259
+240:0:2970
+241:2:1450
+242:0:2970
+243:2:1259
+244:0:2970
+245:2:1453
+246:2:1454
+247:2:1458
+248:2:1459
+249:2:1467
+250:2:1468
+251:2:1472
+252:2:1473
+253:2:1481
+254:2:1486
+255:2:1490
+256:2:1491
+257:2:1499
+258:2:1500
+259:2:1504
+260:2:1505
+261:2:1499
+262:2:1500
+263:2:1504
+264:2:1505
+265:2:1513
+266:2:1518
+267:2:1519
+268:2:1530
+269:2:1531
+270:2:1532
+271:2:1543
+272:2:1548
+273:2:1549
+274:2:1560
+275:2:1561
+276:2:1562
+277:2:1560
+278:2:1561
+279:2:1562
+280:2:1573
+281:2:1580
+282:0:2970
+283:2:1259
+284:0:2970
+285:2:1584
+286:2:1585
+287:2:1586
+288:2:1598
+289:2:1599
+290:2:1603
+291:2:1604
+292:2:1612
+293:2:1617
+294:2:1621
+295:2:1622
+296:2:1630
+297:2:1631
+298:2:1635
+299:2:1636
+300:2:1630
+301:2:1631
+302:2:1635
+303:2:1636
+304:2:1644
+305:2:1649
+306:2:1650
+307:2:1661
+308:2:1662
+309:2:1663
+310:2:1674
+311:2:1679
+312:2:1680
+313:2:1691
+314:2:1692
+315:2:1693
+316:2:1691
+317:2:1692
+318:2:1693
+319:2:1704
+320:2:1715
+321:2:1716
+322:0:2970
+323:2:1259
+324:0:2970
+325:2:1723
+326:2:1724
+327:2:1728
+328:2:1729
+329:2:1737
+330:2:1738
+331:2:1742
+332:2:1743
+333:2:1751
+334:2:1756
+335:2:1760
+336:2:1761
+337:2:1769
+338:2:1770
+339:2:1774
+340:2:1775
+341:2:1769
+342:2:1770
+343:2:1774
+344:2:1775
+345:2:1783
+346:2:1788
+347:2:1789
+348:2:1800
+349:2:1801
+350:2:1802
+351:2:1813
+352:2:1818
+353:2:1819
+354:2:1830
+355:2:1831
+356:2:1832
+357:2:1830
+358:2:1831
+359:2:1832
+360:2:1843
+361:0:2970
+362:2:1259
+363:0:2970
+364:2:1852
+365:2:1853
+366:2:1857
+367:2:1858
+368:2:1866
+369:2:1867
+370:2:1871
+371:2:1872
+372:2:1880
+373:2:1885
+374:2:1889
+375:2:1890
+376:2:1898
+377:2:1899
+378:2:1903
+379:2:1904
+380:2:1898
+381:2:1899
+382:2:1903
+383:2:1904
+384:2:1912
+385:2:1917
+386:2:1918
+387:2:1929
+388:2:1930
+389:2:1931
+390:2:1942
+391:2:1947
+392:2:1948
+393:2:1959
+394:2:1960
+395:2:1961
+396:2:1959
+397:2:1960
+398:2:1961
+399:2:1972
+400:2:1979
+401:0:2970
+402:2:1259
+403:0:2970
+404:2:1983
+405:2:1984
+406:2:1985
+407:2:1997
+408:2:1998
+409:2:2002
+410:2:2003
+411:2:2011
+412:2:2016
+413:2:2020
+414:2:2021
+415:2:2029
+416:2:2030
+417:2:2034
+418:2:2035
+419:2:2029
+420:2:2030
+421:2:2034
+422:2:2035
+423:2:2043
+424:2:2048
+425:2:2049
+426:2:2060
+427:2:2061
+428:2:2062
+429:2:2073
+430:2:2078
+431:2:2079
+432:2:2090
+433:2:2091
+434:2:2092
+435:2:2090
+436:2:2091
+437:2:2092
+438:2:2103
+439:2:2113
+440:2:2114
+441:0:2970
+442:2:1259
+443:0:2970
+444:2:2120
+445:0:2970
+446:2:2745
+447:2:2746
+448:2:2750
+449:2:2754
+450:2:2755
+451:2:2759
+452:2:2767
+453:2:2768
+454:2:2772
+455:2:2776
+456:2:2777
+457:2:2772
+458:2:2776
+459:2:2777
+460:2:2781
+461:2:2788
+462:2:2795
+463:2:2796
+464:2:2803
+465:2:2808
+466:2:2815
+467:2:2816
+468:2:2815
+469:2:2816
+470:2:2823
+471:2:2827
+472:0:2970
+473:2:2832
+474:0:2970
+475:2:2833
+476:0:2970
+477:2:2834
+478:0:2970
+479:2:2835
+480:0:2970
+481:1:51
+482:0:2970
+483:2:2836
+484:0:2970
+485:1:55
+486:1:56
+487:1:60
+488:1:64
+489:1:65
+490:1:69
+491:1:77
+492:1:78
+493:1:82
+494:1:86
+495:1:87
+496:1:82
+497:1:86
+498:1:87
+499:1:91
+500:1:98
+501:1:105
+502:1:106
+503:1:113
+504:1:118
+505:1:125
+506:1:126
+507:1:125
+508:1:126
+509:1:133
+510:1:137
+511:0:2970
+512:2:2835
+513:0:2970
+514:1:142
+515:0:2970
+516:2:2836
+517:0:2970
+518:2:2837
+519:0:2970
+520:2:2842
+521:0:2970
+522:2:2843
+523:0:2970
+524:2:2851
+525:2:2852
+526:2:2856
+527:2:2860
+528:2:2861
+529:2:2865
+530:2:2873
+531:2:2874
+532:2:2878
+533:2:2882
+534:2:2883
+535:2:2878
+536:2:2882
+537:2:2883
+538:2:2887
+539:2:2894
+540:2:2901
+541:2:2902
+542:2:2909
+543:2:2914
+544:2:2921
+545:2:2922
+546:2:2921
+547:2:2922
+548:2:2929
+549:2:2933
+550:0:2970
+551:2:2122
+552:2:2132
+553:0:2970
+554:2:1259
+555:0:2970
+556:2:2123
+557:2:2124
+558:0:2970
+559:2:1259
+560:0:2970
+561:2:2128
+562:0:2970
+563:2:2136
+564:0:2970
+565:2:1256
+566:0:2970
+567:2:1258
+568:0:2970
+569:2:1259
+570:0:2970
+571:2:1260
+572:2:1261
+573:2:1265
+574:2:1266
+575:2:1274
+576:2:1275
+577:2:1279
+578:2:1280
+579:2:1288
+580:2:1293
+581:2:1297
+582:2:1298
+583:2:1306
+584:2:1307
+585:2:1308
+586:2:1306
+587:2:1307
+588:2:1311
+589:2:1312
+590:2:1320
+591:2:1325
+592:2:1326
+593:2:1337
+594:2:1338
+595:2:1339
+596:2:1350
+597:2:1355
+598:2:1356
+599:2:1367
+600:2:1368
+601:2:1369
+602:2:1367
+603:2:1368
+604:2:1369
+605:2:1380
+606:2:1388
+607:0:2970
+608:2:1259
+609:0:2970
+610:2:1392
+611:2:1396
+612:2:1397
+613:2:1401
+614:2:1405
+615:2:1406
+616:2:1410
+617:2:1418
+618:2:1419
+619:2:1423
+620:2:1424
+621:2:1423
+622:2:1427
+623:2:1428
+624:2:1432
+625:0:2970
+626:2:1259
+627:0:2970
+628:2:1440
+629:2:1441
+630:2:1442
+631:0:2970
+632:2:1259
+633:0:2970
+634:2:1447
+635:0:2970
+636:2:2151
+637:2:2152
+638:2:2156
+639:2:2160
+640:2:2161
+641:2:2165
+642:2:2170
+643:2:2178
+644:2:2182
+645:2:2183
+646:2:2178
+647:2:2182
+648:2:2183
+649:2:2187
+650:2:2194
+651:2:2201
+652:2:2202
+653:2:2209
+654:2:2214
+655:2:2221
+656:2:2222
+657:2:2221
+658:2:2222
+659:2:2229
+660:2:2233
+661:0:2970
+662:2:2238
+663:0:2970
+664:2:2239
+665:0:2970
+666:2:2240
+667:0:2970
+668:2:2241
+669:0:2970
+670:1:51
+671:0:2970
+672:2:2242
+673:0:2970
+674:1:55
+675:1:56
+676:1:60
+677:1:64
+678:1:65
+679:1:69
+680:1:77
+681:1:78
+682:1:82
+683:1:86
+684:1:87
+685:1:82
+686:1:86
+687:1:87
+688:1:91
+689:1:98
+690:1:105
+691:1:106
+692:1:113
+693:1:118
+694:1:125
+695:1:126
+696:1:125
+697:1:126
+698:1:133
+699:1:137
+700:0:2970
+701:2:2241
+702:0:2970
+703:1:142
+704:0:2970
+705:2:2242
+706:0:2970
+707:2:2243
+708:0:2970
+709:2:2248
+710:0:2970
+711:2:2249
+712:0:2970
+713:2:2257
+714:2:2258
+715:2:2262
+716:2:2266
+717:2:2267
+718:2:2271
+719:2:2279
+720:2:2280
+721:2:2284
+722:2:2288
+723:2:2289
+724:2:2284
+725:2:2288
+726:2:2289
+727:2:2293
+728:2:2300
+729:2:2307
+730:2:2308
+731:2:2315
+732:2:2320
+733:2:2327
+734:2:2328
+735:2:2327
+736:2:2328
+737:2:2335
+738:2:2339
+739:0:2970
+740:2:1449
+741:2:2132
+742:0:2970
+743:2:1259
+744:0:2970
+745:2:1450
+746:0:2970
+747:2:1259
+748:0:2970
+749:2:1453
+750:2:1454
+751:2:1458
+752:2:1459
+753:2:1467
+754:2:1468
+755:2:1472
+756:2:1473
+757:2:1481
+758:2:1486
+759:2:1490
+760:2:1491
+761:2:1499
+762:2:1500
+763:2:1504
+764:2:1505
+765:2:1499
+766:2:1500
+767:2:1504
+768:2:1505
+769:2:1513
+770:2:1518
+771:2:1519
+772:2:1530
+773:2:1531
+774:2:1532
+775:2:1543
+776:2:1548
+777:2:1549
+778:2:1560
+779:2:1561
+780:2:1562
+781:2:1560
+782:2:1561
+783:2:1562
+784:2:1573
+785:2:1580
+786:0:2970
+787:2:1259
+788:0:2970
+789:2:1584
+790:2:1585
+791:2:1586
+792:2:1598
+793:2:1599
+794:2:1603
+795:2:1604
+796:2:1612
+797:2:1617
+798:2:1621
+799:2:1622
+800:2:1630
+801:2:1631
+802:2:1635
+803:2:1636
+804:2:1630
+805:2:1631
+806:2:1635
+807:2:1636
+808:2:1644
+809:2:1649
+810:2:1650
+811:2:1661
+812:2:1662
+813:2:1663
+814:2:1674
+815:2:1679
+816:2:1680
+817:2:1691
+818:2:1692
+819:2:1693
+820:2:1691
+821:2:1692
+822:2:1693
+823:2:1704
+824:2:1715
+825:2:1716
+826:0:2970
+827:2:1259
+828:0:2970
+829:2:1723
+830:2:1724
+831:2:1728
+832:2:1729
+833:2:1737
+834:2:1738
+835:2:1742
+836:2:1743
+837:2:1751
+838:2:1756
+839:2:1760
+840:2:1761
+841:2:1769
+842:2:1770
+843:2:1774
+844:2:1775
+845:2:1769
+846:2:1770
+847:2:1774
+848:2:1775
+849:2:1783
+850:2:1788
+851:2:1789
+852:2:1800
+853:2:1801
+854:2:1802
+855:2:1813
+856:2:1818
+857:2:1819
+858:2:1830
+859:2:1831
+860:2:1832
+861:2:1830
+862:2:1831
+863:2:1832
+864:2:1843
+865:0:2970
+866:2:1259
+867:0:2970
+868:2:1852
+869:2:1853
+870:2:1857
+871:2:1858
+872:2:1866
+873:2:1867
+874:2:1871
+875:2:1872
+876:2:1880
+877:2:1885
+878:2:1889
+879:2:1890
+880:2:1898
+881:2:1899
+882:2:1903
+883:2:1904
+884:2:1898
+885:2:1899
+886:2:1903
+887:2:1904
+888:2:1912
+889:2:1917
+890:2:1918
+891:2:1929
+892:2:1930
+893:2:1931
+894:2:1942
+895:2:1947
+896:2:1948
+897:2:1959
+898:2:1960
+899:2:1961
+900:2:1959
+901:2:1960
+902:2:1961
+903:2:1972
+904:2:1979
+905:0:2970
+906:2:1259
+907:0:2970
+908:2:1983
+909:2:1984
+910:2:1985
+911:2:1997
+912:2:1998
+913:2:2002
+914:2:2003
+915:2:2011
+916:2:2016
+917:2:2020
+918:2:2021
+919:2:2029
+920:2:2030
+921:2:2034
+922:2:2035
+923:2:2029
+924:2:2030
+925:2:2034
+926:2:2035
+927:2:2043
+928:2:2048
+929:2:2049
+930:2:2060
+931:2:2061
+932:2:2062
+933:2:2073
+934:2:2078
+935:2:2079
+936:2:2090
+937:2:2091
+938:2:2092
+939:2:2090
+940:2:2091
+941:2:2092
+942:2:2103
+943:2:2113
+944:2:2114
+945:0:2970
+946:2:1259
+947:0:2970
+948:2:2120
+949:0:2970
+950:2:2745
+951:2:2746
+952:2:2750
+953:2:2754
+954:2:2755
+955:2:2759
+956:2:2767
+957:2:2768
+958:2:2772
+959:2:2776
+960:2:2777
+961:2:2772
+962:2:2776
+963:2:2777
+964:2:2781
+965:2:2788
+966:2:2795
+967:2:2796
+968:2:2803
+969:2:2808
+970:2:2815
+971:2:2816
+972:2:2815
+973:2:2816
+974:2:2823
+975:2:2827
+976:0:2970
+977:2:2832
+978:0:2970
+979:2:2833
+980:0:2970
+981:2:2834
+982:0:2970
+983:2:2835
+984:0:2970
+985:1:51
+986:0:2970
+987:2:2836
+988:0:2970
+989:1:55
+990:1:56
+991:1:60
+992:1:64
+993:1:65
+994:1:69
+995:1:77
+996:1:78
+997:1:82
+998:1:86
+999:1:87
+1000:1:82
+1001:1:86
+1002:1:87
+1003:1:91
+1004:1:98
+1005:1:105
+1006:1:106
+1007:1:113
+1008:1:118
+1009:1:125
+1010:1:126
+1011:1:125
+1012:1:126
+1013:1:133
+1014:1:137
+1015:0:2970
+1016:2:2835
+1017:0:2970
+1018:1:142
+1019:0:2970
+1020:2:2836
+1021:0:2970
+1022:2:2837
+1023:0:2970
+1024:2:2842
+1025:0:2970
+1026:2:2843
+1027:0:2970
+1028:2:2851
+1029:2:2852
+1030:2:2856
+1031:2:2860
+1032:2:2861
+1033:2:2865
+1034:2:2873
+1035:2:2874
+1036:2:2878
+1037:2:2882
+1038:2:2883
+1039:2:2878
+1040:2:2882
+1041:2:2883
+1042:2:2887
+1043:2:2894
+1044:2:2901
+1045:2:2902
+1046:2:2909
+1047:2:2914
+1048:2:2921
+1049:2:2922
+1050:2:2921
+1051:2:2922
+1052:2:2929
+1053:2:2933
+1054:0:2970
+1055:2:2122
+1056:2:2132
+1057:0:2970
+1058:2:1259
+1059:0:2970
+1060:2:2123
+1061:2:2124
+1062:0:2970
+1063:2:1259
+1064:0:2970
+1065:2:2128
+1066:0:2970
+1067:2:2136
+1068:0:2970
+1069:2:1256
+1070:0:2970
+1071:2:1258
+1072:0:2970
+1073:2:1259
+1074:0:2970
+1075:2:1260
+1076:2:1261
+1077:2:1265
+1078:2:1266
+1079:2:1274
+1080:2:1275
+1081:2:1279
+1082:2:1280
+1083:2:1288
+1084:2:1293
+1085:2:1297
+1086:2:1298
+1087:2:1306
+1088:2:1307
+1089:2:1311
+1090:2:1312
+1091:2:1306
+1092:2:1307
+1093:2:1308
+1094:2:1320
+1095:2:1325
+1096:2:1326
+1097:2:1337
+1098:2:1338
+1099:2:1339
+1100:2:1350
+1101:2:1355
+1102:2:1356
+1103:2:1367
+1104:2:1368
+1105:2:1369
+1106:2:1367
+1107:2:1368
+1108:2:1369
+1109:2:1380
+1110:2:1388
+1111:0:2970
+1112:2:1259
+1113:0:2970
+1114:1:143
+1115:0:2970
+1116:1:145
+1117:0:2970
+1118:1:44
+1119:0:2970
+1120:1:151
+1121:1:152
+1122:1:156
+1123:1:157
+1124:1:165
+1125:1:166
+1126:1:170
+1127:1:171
+1128:1:179
+1129:1:184
+1130:1:188
+1131:1:189
+1132:1:197
+1133:1:198
+1134:1:202
+1135:1:203
+1136:1:197
+1137:1:198
+1138:1:202
+1139:1:203
+1140:1:211
+1141:1:216
+1142:1:217
+1143:1:228
+1144:1:229
+1145:1:230
+1146:1:241
+1147:1:246
+1148:1:247
+1149:1:258
+1150:1:259
+1151:1:260
+1152:1:258
+1153:1:259
+1154:1:260
+1155:1:271
+1156:0:2970
+1157:1:40
+1158:0:2970
+1159:1:41
+1160:0:2970
+1161:2:1392
+1162:2:1396
+1163:2:1397
+1164:2:1401
+1165:2:1405
+1166:2:1406
+1167:2:1410
+1168:2:1418
+1169:2:1419
+1170:2:1423
+1171:2:1427
+1172:2:1428
+1173:2:1423
+1174:2:1424
+1175:2:1432
+1176:0:2970
+1177:2:1259
+1178:0:2970
+1179:2:1440
+1180:2:1441
+1181:2:1442
+1182:0:2970
+1183:2:1259
+1184:0:2970
+1185:2:1447
+1186:0:2970
+1187:2:2151
+1188:2:2152
+1189:2:2156
+1190:2:2160
+1191:2:2161
+1192:2:2165
+1193:2:2170
+1194:2:2178
+1195:2:2182
+1196:2:2183
+1197:2:2178
+1198:2:2182
+1199:2:2183
+1200:2:2187
+1201:2:2194
+1202:2:2201
+1203:2:2202
+1204:2:2209
+1205:2:2214
+1206:2:2221
+1207:2:2222
+1208:2:2221
+1209:2:2222
+1210:2:2229
+1211:2:2233
+1212:0:2970
+1213:2:2238
+1214:0:2970
+1215:2:2239
+1216:0:2970
+1217:2:2240
+1218:0:2970
+1219:2:2241
+1220:0:2970
+1221:1:42
+1222:0:2970
+1223:2:2242
+1224:0:2970
+1225:1:143
+1226:0:2970
+1227:1:145
+1228:0:2970
+1229:2:2241
+1230:0:2970
+1231:1:44
+1232:0:2970
+1233:2:2242
+1234:0:2970
+1235:1:280
+1236:1:281
+1237:0:2970
+1238:1:40
+1239:0:2970
+1240:1:41
+1241:0:2970
+1242:2:2241
+1243:0:2970
+1244:1:42
+1245:0:2970
+1246:2:2242
+1247:0:2970
+1248:1:143
+1249:0:2970
+1250:1:145
+1251:0:2970
+1252:2:2241
+1253:0:2970
+1254:1:44
+1255:0:2970
+1256:2:2242
+1257:0:2970
+1258:1:287
+1259:1:288
+1260:1:292
+1261:1:293
+1262:1:301
+1263:1:302
+1264:1:306
+1265:1:307
+1266:1:315
+1267:1:320
+1268:1:324
+1269:1:325
+1270:1:333
+1271:1:334
+1272:1:338
+1273:1:339
+1274:1:333
+1275:1:334
+1276:1:338
+1277:1:339
+1278:1:347
+1279:1:352
+1280:1:353
+1281:1:364
+1282:1:365
+1283:1:366
+1284:1:377
+1285:1:382
+1286:1:383
+1287:1:394
+1288:1:395
+1289:1:396
+1290:1:394
+1291:1:402
+1292:1:403
+1293:1:407
+1294:0:2970
+1295:1:40
+1296:0:2970
+1297:1:41
+1298:0:2970
+1299:2:2241
+1300:0:2970
+1301:1:42
+1302:0:2970
+1303:2:2242
+1304:0:2970
+1305:1:143
+1306:0:2970
+1307:1:145
+1308:0:2970
+1309:2:2241
+1310:0:2970
+1311:1:44
+1312:0:2970
+1313:2:2242
+1314:0:2970
+1315:1:416
+1316:1:417
+1317:1:421
+1318:1:422
+1319:1:430
+1320:1:431
+1321:1:435
+1322:1:436
+1323:1:444
+1324:1:449
+1325:1:453
+1326:1:454
+1327:1:462
+1328:1:463
+1329:1:467
+1330:1:468
+1331:1:462
+1332:1:463
+1333:1:467
+1334:1:468
+1335:1:476
+1336:1:481
+1337:1:482
+1338:1:493
+1339:1:494
+1340:1:495
+1341:1:506
+1342:1:511
+1343:1:512
+1344:1:523
+1345:1:524
+1346:1:525
+1347:1:523
+1348:1:531
+1349:1:532
+1350:1:536
+1351:1:543
+1352:0:2970
+1353:1:40
+1354:0:2970
+1355:1:41
+1356:0:2970
+1357:2:2241
+1358:0:2970
+1359:1:42
+1360:0:2970
+1361:2:2242
+1362:0:2970
+1363:1:143
+1364:0:2970
+1365:1:145
+1366:0:2970
+1367:2:2241
+1368:0:2970
+1369:1:44
+1370:0:2970
+1371:2:2242
+1372:0:2970
+1373:1:681
+1374:1:682
+1375:1:686
+1376:1:687
+1377:1:695
+1378:1:696
+1379:1:697
+1380:1:709
+1381:1:714
+1382:1:718
+1383:1:719
+1384:1:727
+1385:1:728
+1386:1:732
+1387:1:733
+1388:1:727
+1389:1:728
+1390:1:732
+1391:1:733
+1392:1:741
+1393:1:746
+1394:1:747
+1395:1:758
+1396:1:759
+1397:1:760
+1398:1:771
+1399:1:776
+1400:1:777
+1401:1:788
+1402:1:789
+1403:1:790
+1404:1:788
+1405:1:796
+1406:1:797
+1407:1:801
+1408:0:2970
+1409:1:40
+1410:0:2970
+1411:1:41
+1412:0:2970
+1413:2:2241
+1414:0:2970
+1415:1:42
+1416:0:2970
+1417:2:2242
+1418:0:2970
+1419:1:143
+1420:0:2970
+1421:1:145
+1422:0:2970
+1423:2:2241
+1424:0:2970
+1425:1:44
+1426:0:2970
+1427:2:2242
+1428:0:2970
+1429:1:810
+1430:0:2970
+1431:2:2241
+1432:0:2970
+1433:1:1087
+1434:1:1091
+1435:1:1092
+1436:1:1100
+1437:1:1101
+1438:1:1105
+1439:1:1106
+1440:1:1114
+1441:1:1119
+1442:1:1123
+1443:1:1124
+1444:1:1132
+1445:1:1133
+1446:1:1137
+1447:1:1138
+1448:1:1132
+1449:1:1133
+1450:1:1137
+1451:1:1138
+1452:1:1146
+1453:1:1151
+1454:1:1152
+1455:1:1163
+1456:1:1164
+1457:1:1165
+1458:1:1176
+1459:1:1181
+1460:1:1182
+1461:1:1193
+1462:1:1194
+1463:1:1195
+1464:1:1193
+1465:1:1201
+1466:1:1202
+1467:1:1206
+1468:1:1210
+1469:0:2970
+1470:2:2242
+1471:0:2970
+1472:1:812
+1473:1:813
+1474:0:2968
+1475:1:40
+1476:0:2974
+1477:1:1060
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.define b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.define
new file mode 100644 (file)
index 0000000..710f29d
--- /dev/null
@@ -0,0 +1 @@
+#define NO_WMB
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.log b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.log
new file mode 100644 (file)
index 0000000..deef6fa
--- /dev/null
@@ -0,0 +1,298 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_wmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1179)
+Depth=    3829 States=    1e+06 Transitions= 2.15e+08 Memory=   512.834        t=    341 R=   3e+03
+Depth=    3829 States=    2e+06 Transitions= 5.61e+08 Memory=   559.026        t=    920 R=   2e+03
+pan: claim violated! (at depth 1358)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 3829, errors: 1
+  2932262 states, stored
+7.8211443e+08 states, matched
+7.850467e+08 transitions (= stored+matched)
+4.5802467e+09 atomic steps
+hash conflicts: 5.9075827e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+  324.385      equivalent memory usage for states (stored*(State-vector + overhead))
+  136.208      actual memory usage for states (compression: 41.99%)
+               state-vector as stored = 13 byte + 36 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  601.897      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 22861 1831 2500 2 2 ]
+unreached in proctype urcu_reader
+       line 713, "pan.___", state 12, "((i<1))"
+       line 713, "pan.___", state 12, "((i>=1))"
+       line 268, "pan.___", state 55, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 86, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 102, "(1)"
+       line 249, "pan.___", state 110, "(1)"
+       line 253, "pan.___", state 122, "(1)"
+       line 257, "pan.___", state 130, "(1)"
+       line 404, "pan.___", state 156, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 188, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 202, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 221, "(1)"
+       line 431, "pan.___", state 251, "(1)"
+       line 435, "pan.___", state 264, "(1)"
+       line 614, "pan.___", state 285, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 404, "pan.___", state 292, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 324, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 338, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 357, "(1)"
+       line 431, "pan.___", state 387, "(1)"
+       line 435, "pan.___", state 400, "(1)"
+       line 404, "pan.___", state 421, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 453, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 467, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 486, "(1)"
+       line 431, "pan.___", state 516, "(1)"
+       line 435, "pan.___", state 529, "(1)"
+       line 404, "pan.___", state 552, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 554, "(1)"
+       line 404, "pan.___", state 555, "(cache_dirty_urcu_gp_ctr)"
+       line 404, "pan.___", state 555, "else"
+       line 404, "pan.___", state 558, "(1)"
+       line 408, "pan.___", state 566, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 568, "(1)"
+       line 408, "pan.___", state 569, "(cache_dirty_urcu_active_readers)"
+       line 408, "pan.___", state 569, "else"
+       line 408, "pan.___", state 572, "(1)"
+       line 408, "pan.___", state 573, "(1)"
+       line 408, "pan.___", state 573, "(1)"
+       line 406, "pan.___", state 578, "((i<1))"
+       line 406, "pan.___", state 578, "((i>=1))"
+       line 413, "pan.___", state 584, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 586, "(1)"
+       line 413, "pan.___", state 587, "(cache_dirty_rcu_ptr)"
+       line 413, "pan.___", state 587, "else"
+       line 413, "pan.___", state 590, "(1)"
+       line 413, "pan.___", state 591, "(1)"
+       line 413, "pan.___", state 591, "(1)"
+       line 417, "pan.___", state 598, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 600, "(1)"
+       line 417, "pan.___", state 601, "(cache_dirty_rcu_data[i])"
+       line 417, "pan.___", state 601, "else"
+       line 417, "pan.___", state 604, "(1)"
+       line 417, "pan.___", state 605, "(1)"
+       line 417, "pan.___", state 605, "(1)"
+       line 415, "pan.___", state 610, "((i<2))"
+       line 415, "pan.___", state 610, "((i>=2))"
+       line 422, "pan.___", state 617, "(1)"
+       line 422, "pan.___", state 618, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 618, "else"
+       line 422, "pan.___", state 621, "(1)"
+       line 422, "pan.___", state 622, "(1)"
+       line 422, "pan.___", state 622, "(1)"
+       line 426, "pan.___", state 630, "(1)"
+       line 426, "pan.___", state 631, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 631, "else"
+       line 426, "pan.___", state 634, "(1)"
+       line 426, "pan.___", state 635, "(1)"
+       line 426, "pan.___", state 635, "(1)"
+       line 424, "pan.___", state 640, "((i<1))"
+       line 424, "pan.___", state 640, "((i>=1))"
+       line 431, "pan.___", state 647, "(1)"
+       line 431, "pan.___", state 648, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 648, "else"
+       line 431, "pan.___", state 651, "(1)"
+       line 431, "pan.___", state 652, "(1)"
+       line 431, "pan.___", state 652, "(1)"
+       line 435, "pan.___", state 660, "(1)"
+       line 435, "pan.___", state 661, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 661, "else"
+       line 435, "pan.___", state 664, "(1)"
+       line 435, "pan.___", state 665, "(1)"
+       line 435, "pan.___", state 665, "(1)"
+       line 433, "pan.___", state 670, "((i<2))"
+       line 433, "pan.___", state 670, "((i>=2))"
+       line 443, "pan.___", state 674, "(1)"
+       line 443, "pan.___", state 674, "(1)"
+       line 614, "pan.___", state 677, "cached_urcu_active_readers = (tmp+1)"
+       line 614, "pan.___", state 678, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 614, "pan.___", state 679, "(1)"
+       line 404, "pan.___", state 686, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 718, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 732, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 751, "(1)"
+       line 431, "pan.___", state 781, "(1)"
+       line 435, "pan.___", state 794, "(1)"
+       line 404, "pan.___", state 821, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 853, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 867, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 886, "(1)"
+       line 431, "pan.___", state 916, "(1)"
+       line 435, "pan.___", state 929, "(1)"
+       line 404, "pan.___", state 950, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 982, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 996, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 1015, "(1)"
+       line 431, "pan.___", state 1045, "(1)"
+       line 435, "pan.___", state 1058, "(1)"
+       line 245, "pan.___", state 1091, "(1)"
+       line 253, "pan.___", state 1111, "(1)"
+       line 257, "pan.___", state 1119, "(1)"
+       line 748, "pan.___", state 1136, "-end-"
+       (92 of 1136 states)
+unreached in proctype urcu_writer
+       line 837, "pan.___", state 12, "((i<1))"
+       line 837, "pan.___", state 12, "((i>=1))"
+       line 404, "pan.___", state 46, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 60, "cache_dirty_urcu_active_readers = 0"
+       line 422, "pan.___", state 111, "(1)"
+       line 426, "pan.___", state 124, "(1)"
+       line 268, "pan.___", state 177, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 179, "(1)"
+       line 272, "pan.___", state 186, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 188, "(1)"
+       line 272, "pan.___", state 189, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 189, "else"
+       line 270, "pan.___", state 194, "((i<1))"
+       line 270, "pan.___", state 194, "((i>=1))"
+       line 276, "pan.___", state 199, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 201, "(1)"
+       line 276, "pan.___", state 202, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 202, "else"
+       line 280, "pan.___", state 208, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 210, "(1)"
+       line 280, "pan.___", state 211, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 211, "else"
+       line 285, "pan.___", state 220, "(cache_dirty_urcu_gp_ctr)"
+       line 285, "pan.___", state 220, "else"
+       line 404, "pan.___", state 239, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 253, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 271, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 285, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 304, "(1)"
+       line 426, "pan.___", state 317, "(1)"
+       line 431, "pan.___", state 334, "(1)"
+       line 435, "pan.___", state 347, "(1)"
+       line 408, "pan.___", state 384, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 402, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 416, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 448, "(1)"
+       line 431, "pan.___", state 465, "(1)"
+       line 435, "pan.___", state 478, "(1)"
+       line 408, "pan.___", state 523, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 541, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 555, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 587, "(1)"
+       line 431, "pan.___", state 604, "(1)"
+       line 435, "pan.___", state 617, "(1)"
+       line 408, "pan.___", state 652, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 670, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 684, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 716, "(1)"
+       line 431, "pan.___", state 733, "(1)"
+       line 435, "pan.___", state 746, "(1)"
+       line 408, "pan.___", state 783, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 801, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 815, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 847, "(1)"
+       line 431, "pan.___", state 864, "(1)"
+       line 435, "pan.___", state 877, "(1)"
+       line 268, "pan.___", state 932, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 941, "cache_dirty_urcu_active_readers = 0"
+       line 245, "pan.___", state 979, "(1)"
+       line 249, "pan.___", state 987, "(1)"
+       line 253, "pan.___", state 999, "(1)"
+       line 257, "pan.___", state 1007, "(1)"
+       line 268, "pan.___", state 1038, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1047, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1060, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1069, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1085, "(1)"
+       line 249, "pan.___", state 1093, "(1)"
+       line 253, "pan.___", state 1105, "(1)"
+       line 257, "pan.___", state 1113, "(1)"
+       line 272, "pan.___", state 1139, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1152, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1161, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1177, "(1)"
+       line 249, "pan.___", state 1185, "(1)"
+       line 253, "pan.___", state 1197, "(1)"
+       line 257, "pan.___", state 1205, "(1)"
+       line 268, "pan.___", state 1236, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1245, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1258, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1267, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1283, "(1)"
+       line 249, "pan.___", state 1291, "(1)"
+       line 253, "pan.___", state 1303, "(1)"
+       line 257, "pan.___", state 1311, "(1)"
+       line 272, "pan.___", state 1337, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1350, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1359, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1375, "(1)"
+       line 249, "pan.___", state 1383, "(1)"
+       line 253, "pan.___", state 1395, "(1)"
+       line 257, "pan.___", state 1403, "(1)"
+       line 268, "pan.___", state 1434, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1443, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1456, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1465, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1481, "(1)"
+       line 249, "pan.___", state 1489, "(1)"
+       line 253, "pan.___", state 1501, "(1)"
+       line 257, "pan.___", state 1509, "(1)"
+       line 272, "pan.___", state 1535, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1548, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1557, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1573, "(1)"
+       line 249, "pan.___", state 1581, "(1)"
+       line 253, "pan.___", state 1593, "(1)"
+       line 257, "pan.___", state 1601, "(1)"
+       line 268, "pan.___", state 1632, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1641, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1654, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1663, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1679, "(1)"
+       line 249, "pan.___", state 1687, "(1)"
+       line 253, "pan.___", state 1699, "(1)"
+       line 257, "pan.___", state 1707, "(1)"
+       line 1123, "pan.___", state 1723, "-end-"
+       (110 of 1723 states)
+unreached in proctype :init:
+       line 1138, "pan.___", state 11, "((i<1))"
+       line 1138, "pan.___", state 11, "((i>=1))"
+       (1 of 26 states)
+unreached in proctype :never:
+       line 1184, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 1.31e+03 seconds
+pan: rate 2245.8255 states/second
+pan: avg transition delay 1.6631e-06 usec
+cp .input.spin urcu_free_no_wmb.spin.input
+cp .input.spin.trail urcu_free_no_wmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.spin.input b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.spin.input
new file mode 100644 (file)
index 0000000..b79ec79
--- /dev/null
@@ -0,0 +1,1157 @@
+#define NO_WMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.spin.input.trail b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_no_wmb.spin.input.trail
new file mode 100644 (file)
index 0000000..4b13aef
--- /dev/null
@@ -0,0 +1,1361 @@
+-2:3:-2
+-4:-4:-4
+1:0:2887
+2:2:1136
+3:2:1141
+4:2:1145
+5:2:1153
+6:2:1157
+7:2:1161
+8:0:2887
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:2887
+16:3:2859
+17:3:2862
+18:3:2867
+19:3:2874
+20:3:2877
+21:3:2881
+22:3:2882
+23:0:2887
+24:3:2884
+25:0:2887
+26:2:1165
+27:0:2887
+28:2:1171
+29:0:2887
+30:2:1172
+31:0:2887
+32:2:1174
+33:0:2887
+34:2:1175
+35:0:2887
+36:2:1176
+37:0:2887
+38:2:1177
+39:2:1178
+40:2:1182
+41:2:1183
+42:2:1191
+43:2:1192
+44:2:1196
+45:2:1197
+46:2:1205
+47:2:1210
+48:2:1214
+49:2:1215
+50:2:1223
+51:2:1224
+52:2:1228
+53:2:1229
+54:2:1223
+55:2:1224
+56:2:1228
+57:2:1229
+58:2:1237
+59:2:1242
+60:2:1243
+61:2:1254
+62:2:1255
+63:2:1256
+64:2:1267
+65:2:1272
+66:2:1273
+67:2:1284
+68:2:1285
+69:2:1286
+70:2:1284
+71:2:1285
+72:2:1286
+73:2:1297
+74:2:1305
+75:0:2887
+76:2:1176
+77:0:2887
+78:2:1357
+79:2:1358
+80:2:1359
+81:0:2887
+82:2:1176
+83:0:2887
+84:2:1364
+85:0:2887
+86:2:2068
+87:2:2069
+88:2:2073
+89:2:2077
+90:2:2078
+91:2:2082
+92:2:2087
+93:2:2095
+94:2:2099
+95:2:2100
+96:2:2095
+97:2:2096
+98:2:2104
+99:2:2111
+100:2:2118
+101:2:2119
+102:2:2126
+103:2:2131
+104:2:2138
+105:2:2139
+106:2:2138
+107:2:2139
+108:2:2146
+109:2:2150
+110:0:2887
+111:2:2155
+112:0:2887
+113:2:2156
+114:0:2887
+115:2:2157
+116:0:2887
+117:2:2158
+118:0:2887
+119:1:29
+120:0:2887
+121:2:2159
+122:0:2887
+123:1:35
+124:0:2887
+125:1:36
+126:0:2887
+127:2:2158
+128:0:2887
+129:1:37
+130:0:2887
+131:2:2159
+132:0:2887
+133:1:38
+134:0:2887
+135:2:2158
+136:0:2887
+137:1:39
+138:0:2887
+139:2:2159
+140:0:2887
+141:1:40
+142:0:2887
+143:1:41
+144:0:2887
+145:2:2158
+146:0:2887
+147:1:42
+148:0:2887
+149:2:2159
+150:0:2887
+151:1:51
+152:0:2887
+153:2:2158
+154:0:2887
+155:1:55
+156:1:56
+157:1:60
+158:1:64
+159:1:65
+160:1:69
+161:1:77
+162:1:78
+163:1:82
+164:1:86
+165:1:87
+166:1:82
+167:1:86
+168:1:87
+169:1:91
+170:1:98
+171:1:105
+172:1:106
+173:1:113
+174:1:118
+175:1:125
+176:1:126
+177:1:125
+178:1:126
+179:1:133
+180:1:137
+181:0:2887
+182:2:2159
+183:0:2887
+184:1:142
+185:0:2887
+186:2:2160
+187:0:2887
+188:2:2165
+189:0:2887
+190:2:2166
+191:0:2887
+192:2:2174
+193:2:2175
+194:2:2179
+195:2:2183
+196:2:2184
+197:2:2188
+198:2:2196
+199:2:2197
+200:2:2201
+201:2:2205
+202:2:2206
+203:2:2201
+204:2:2205
+205:2:2206
+206:2:2210
+207:2:2217
+208:2:2224
+209:2:2225
+210:2:2232
+211:2:2237
+212:2:2244
+213:2:2245
+214:2:2244
+215:2:2245
+216:2:2252
+217:2:2256
+218:0:2887
+219:2:1366
+220:2:2049
+221:0:2887
+222:2:1176
+223:0:2887
+224:2:1367
+225:0:2887
+226:2:1176
+227:0:2887
+228:2:1370
+229:2:1371
+230:2:1375
+231:2:1376
+232:2:1384
+233:2:1385
+234:2:1389
+235:2:1390
+236:2:1398
+237:2:1403
+238:2:1407
+239:2:1408
+240:2:1416
+241:2:1417
+242:2:1421
+243:2:1422
+244:2:1416
+245:2:1417
+246:2:1421
+247:2:1422
+248:2:1430
+249:2:1435
+250:2:1436
+251:2:1447
+252:2:1448
+253:2:1449
+254:2:1460
+255:2:1465
+256:2:1466
+257:2:1477
+258:2:1478
+259:2:1479
+260:2:1477
+261:2:1478
+262:2:1479
+263:2:1490
+264:2:1497
+265:0:2887
+266:2:1176
+267:0:2887
+268:2:1501
+269:2:1502
+270:2:1503
+271:2:1515
+272:2:1516
+273:2:1520
+274:2:1521
+275:2:1529
+276:2:1534
+277:2:1538
+278:2:1539
+279:2:1547
+280:2:1548
+281:2:1552
+282:2:1553
+283:2:1547
+284:2:1548
+285:2:1552
+286:2:1553
+287:2:1561
+288:2:1566
+289:2:1567
+290:2:1578
+291:2:1579
+292:2:1580
+293:2:1591
+294:2:1596
+295:2:1597
+296:2:1608
+297:2:1609
+298:2:1610
+299:2:1608
+300:2:1609
+301:2:1610
+302:2:1621
+303:2:1632
+304:2:1633
+305:0:2887
+306:2:1176
+307:0:2887
+308:2:1640
+309:2:1641
+310:2:1645
+311:2:1646
+312:2:1654
+313:2:1655
+314:2:1659
+315:2:1660
+316:2:1668
+317:2:1673
+318:2:1677
+319:2:1678
+320:2:1686
+321:2:1687
+322:2:1691
+323:2:1692
+324:2:1686
+325:2:1687
+326:2:1691
+327:2:1692
+328:2:1700
+329:2:1705
+330:2:1706
+331:2:1717
+332:2:1718
+333:2:1719
+334:2:1730
+335:2:1735
+336:2:1736
+337:2:1747
+338:2:1748
+339:2:1749
+340:2:1747
+341:2:1748
+342:2:1749
+343:2:1760
+344:0:2887
+345:2:1176
+346:0:2887
+347:2:1769
+348:2:1770
+349:2:1774
+350:2:1775
+351:2:1783
+352:2:1784
+353:2:1788
+354:2:1789
+355:2:1797
+356:2:1802
+357:2:1806
+358:2:1807
+359:2:1815
+360:2:1816
+361:2:1820
+362:2:1821
+363:2:1815
+364:2:1816
+365:2:1820
+366:2:1821
+367:2:1829
+368:2:1834
+369:2:1835
+370:2:1846
+371:2:1847
+372:2:1848
+373:2:1859
+374:2:1864
+375:2:1865
+376:2:1876
+377:2:1877
+378:2:1878
+379:2:1876
+380:2:1877
+381:2:1878
+382:2:1889
+383:2:1896
+384:0:2887
+385:2:1176
+386:0:2887
+387:2:1900
+388:2:1901
+389:2:1902
+390:2:1914
+391:2:1915
+392:2:1919
+393:2:1920
+394:2:1928
+395:2:1933
+396:2:1937
+397:2:1938
+398:2:1946
+399:2:1947
+400:2:1951
+401:2:1952
+402:2:1946
+403:2:1947
+404:2:1951
+405:2:1952
+406:2:1960
+407:2:1965
+408:2:1966
+409:2:1977
+410:2:1978
+411:2:1979
+412:2:1990
+413:2:1995
+414:2:1996
+415:2:2007
+416:2:2008
+417:2:2009
+418:2:2007
+419:2:2008
+420:2:2009
+421:2:2020
+422:2:2030
+423:2:2031
+424:0:2887
+425:2:1176
+426:0:2887
+427:2:2037
+428:0:2887
+429:2:2662
+430:2:2663
+431:2:2667
+432:2:2671
+433:2:2672
+434:2:2676
+435:2:2684
+436:2:2685
+437:2:2689
+438:2:2693
+439:2:2694
+440:2:2689
+441:2:2693
+442:2:2694
+443:2:2698
+444:2:2705
+445:2:2712
+446:2:2713
+447:2:2720
+448:2:2725
+449:2:2732
+450:2:2733
+451:2:2732
+452:2:2733
+453:2:2740
+454:2:2744
+455:0:2887
+456:2:2749
+457:0:2887
+458:2:2750
+459:0:2887
+460:2:2751
+461:0:2887
+462:2:2752
+463:0:2887
+464:1:51
+465:0:2887
+466:2:2753
+467:0:2887
+468:1:55
+469:1:56
+470:1:60
+471:1:64
+472:1:65
+473:1:69
+474:1:77
+475:1:78
+476:1:82
+477:1:86
+478:1:87
+479:1:82
+480:1:86
+481:1:87
+482:1:91
+483:1:98
+484:1:105
+485:1:106
+486:1:113
+487:1:118
+488:1:125
+489:1:126
+490:1:125
+491:1:126
+492:1:133
+493:1:137
+494:0:2887
+495:2:2752
+496:0:2887
+497:1:142
+498:0:2887
+499:2:2753
+500:0:2887
+501:2:2754
+502:0:2887
+503:2:2759
+504:0:2887
+505:2:2760
+506:0:2887
+507:2:2768
+508:2:2769
+509:2:2773
+510:2:2777
+511:2:2778
+512:2:2782
+513:2:2790
+514:2:2791
+515:2:2795
+516:2:2799
+517:2:2800
+518:2:2795
+519:2:2799
+520:2:2800
+521:2:2804
+522:2:2811
+523:2:2818
+524:2:2819
+525:2:2826
+526:2:2831
+527:2:2838
+528:2:2839
+529:2:2838
+530:2:2839
+531:2:2846
+532:2:2850
+533:0:2887
+534:2:2039
+535:2:2049
+536:0:2887
+537:2:1176
+538:0:2887
+539:2:2040
+540:2:2041
+541:0:2887
+542:2:1176
+543:0:2887
+544:2:2045
+545:0:2887
+546:2:2053
+547:0:2887
+548:2:1172
+549:0:2887
+550:2:1174
+551:0:2887
+552:2:1175
+553:0:2887
+554:2:1176
+555:0:2887
+556:2:1357
+557:2:1358
+558:2:1359
+559:0:2887
+560:2:1176
+561:0:2887
+562:2:1177
+563:2:1178
+564:2:1182
+565:2:1183
+566:2:1191
+567:2:1192
+568:2:1196
+569:2:1197
+570:2:1205
+571:2:1210
+572:2:1211
+573:2:1223
+574:2:1224
+575:2:1225
+576:2:1223
+577:2:1224
+578:2:1228
+579:2:1229
+580:2:1237
+581:2:1242
+582:2:1243
+583:2:1254
+584:2:1255
+585:2:1256
+586:2:1267
+587:2:1272
+588:2:1273
+589:2:1284
+590:2:1285
+591:2:1286
+592:2:1284
+593:2:1285
+594:2:1286
+595:2:1297
+596:2:1305
+597:0:2887
+598:2:1176
+599:0:2887
+600:2:1364
+601:0:2887
+602:2:2068
+603:2:2069
+604:2:2073
+605:2:2077
+606:2:2078
+607:2:2082
+608:2:2090
+609:2:2091
+610:2:2095
+611:2:2096
+612:2:2095
+613:2:2099
+614:2:2100
+615:2:2104
+616:2:2111
+617:2:2118
+618:2:2119
+619:2:2126
+620:2:2131
+621:2:2138
+622:2:2139
+623:2:2138
+624:2:2139
+625:2:2146
+626:2:2150
+627:0:2887
+628:2:2155
+629:0:2887
+630:2:2156
+631:0:2887
+632:2:2157
+633:0:2887
+634:2:2158
+635:0:2887
+636:1:51
+637:0:2887
+638:2:2159
+639:0:2887
+640:1:55
+641:1:56
+642:1:60
+643:1:64
+644:1:65
+645:1:69
+646:1:77
+647:1:78
+648:1:82
+649:1:86
+650:1:87
+651:1:82
+652:1:86
+653:1:87
+654:1:91
+655:1:98
+656:1:105
+657:1:106
+658:1:113
+659:1:118
+660:1:125
+661:1:126
+662:1:125
+663:1:126
+664:1:133
+665:1:137
+666:0:2887
+667:2:2158
+668:0:2887
+669:1:142
+670:0:2887
+671:2:2159
+672:0:2887
+673:2:2160
+674:0:2887
+675:2:2165
+676:0:2887
+677:2:2166
+678:0:2887
+679:2:2174
+680:2:2175
+681:2:2179
+682:2:2183
+683:2:2184
+684:2:2188
+685:2:2196
+686:2:2197
+687:2:2201
+688:2:2205
+689:2:2206
+690:2:2201
+691:2:2205
+692:2:2206
+693:2:2210
+694:2:2217
+695:2:2224
+696:2:2225
+697:2:2232
+698:2:2237
+699:2:2244
+700:2:2245
+701:2:2244
+702:2:2245
+703:2:2252
+704:2:2256
+705:0:2887
+706:2:1366
+707:2:2049
+708:0:2887
+709:2:1176
+710:0:2887
+711:2:1367
+712:0:2887
+713:2:1176
+714:0:2887
+715:2:1370
+716:2:1371
+717:2:1375
+718:2:1376
+719:2:1384
+720:2:1385
+721:2:1389
+722:2:1390
+723:2:1398
+724:2:1403
+725:2:1407
+726:2:1408
+727:2:1416
+728:2:1417
+729:2:1421
+730:2:1422
+731:2:1416
+732:2:1417
+733:2:1421
+734:2:1422
+735:2:1430
+736:2:1435
+737:2:1436
+738:2:1447
+739:2:1448
+740:2:1449
+741:2:1460
+742:2:1465
+743:2:1466
+744:2:1477
+745:2:1478
+746:2:1479
+747:2:1477
+748:2:1478
+749:2:1479
+750:2:1490
+751:2:1497
+752:0:2887
+753:2:1176
+754:0:2887
+755:2:1501
+756:2:1502
+757:2:1503
+758:2:1515
+759:2:1516
+760:2:1520
+761:2:1521
+762:2:1529
+763:2:1534
+764:2:1538
+765:2:1539
+766:2:1547
+767:2:1548
+768:2:1552
+769:2:1553
+770:2:1547
+771:2:1548
+772:2:1552
+773:2:1553
+774:2:1561
+775:2:1566
+776:2:1567
+777:2:1578
+778:2:1579
+779:2:1580
+780:2:1591
+781:2:1596
+782:2:1597
+783:2:1608
+784:2:1609
+785:2:1610
+786:2:1608
+787:2:1609
+788:2:1610
+789:2:1621
+790:2:1632
+791:2:1633
+792:0:2887
+793:2:1176
+794:0:2887
+795:2:1640
+796:2:1641
+797:2:1645
+798:2:1646
+799:2:1654
+800:2:1655
+801:2:1659
+802:2:1660
+803:2:1668
+804:2:1673
+805:2:1677
+806:2:1678
+807:2:1686
+808:2:1687
+809:2:1691
+810:2:1692
+811:2:1686
+812:2:1687
+813:2:1691
+814:2:1692
+815:2:1700
+816:2:1705
+817:2:1706
+818:2:1717
+819:2:1718
+820:2:1719
+821:2:1730
+822:2:1735
+823:2:1736
+824:2:1747
+825:2:1748
+826:2:1749
+827:2:1747
+828:2:1748
+829:2:1749
+830:2:1760
+831:0:2887
+832:2:1176
+833:0:2887
+834:2:1769
+835:2:1770
+836:2:1774
+837:2:1775
+838:2:1783
+839:2:1784
+840:2:1788
+841:2:1789
+842:2:1797
+843:2:1802
+844:2:1806
+845:2:1807
+846:2:1815
+847:2:1816
+848:2:1820
+849:2:1821
+850:2:1815
+851:2:1816
+852:2:1820
+853:2:1821
+854:2:1829
+855:2:1834
+856:2:1835
+857:2:1846
+858:2:1847
+859:2:1848
+860:2:1859
+861:2:1864
+862:2:1865
+863:2:1876
+864:2:1877
+865:2:1878
+866:2:1876
+867:2:1877
+868:2:1878
+869:2:1889
+870:2:1896
+871:0:2887
+872:2:1176
+873:0:2887
+874:2:1900
+875:2:1901
+876:2:1902
+877:2:1914
+878:2:1915
+879:2:1919
+880:2:1920
+881:2:1928
+882:2:1933
+883:2:1937
+884:2:1938
+885:2:1946
+886:2:1947
+887:2:1951
+888:2:1952
+889:2:1946
+890:2:1947
+891:2:1951
+892:2:1952
+893:2:1960
+894:2:1965
+895:2:1966
+896:2:1977
+897:2:1978
+898:2:1979
+899:2:1990
+900:2:1995
+901:2:1996
+902:2:2007
+903:2:2008
+904:2:2009
+905:2:2007
+906:2:2008
+907:2:2009
+908:2:2020
+909:2:2030
+910:2:2031
+911:0:2887
+912:2:1176
+913:0:2887
+914:2:2037
+915:0:2887
+916:2:2662
+917:2:2663
+918:2:2667
+919:2:2671
+920:2:2672
+921:2:2676
+922:2:2684
+923:2:2685
+924:2:2689
+925:2:2693
+926:2:2694
+927:2:2689
+928:2:2693
+929:2:2694
+930:2:2698
+931:2:2705
+932:2:2712
+933:2:2713
+934:2:2720
+935:2:2725
+936:2:2732
+937:2:2733
+938:2:2732
+939:2:2733
+940:2:2740
+941:2:2744
+942:0:2887
+943:2:2749
+944:0:2887
+945:2:2750
+946:0:2887
+947:2:2751
+948:0:2887
+949:2:2752
+950:0:2887
+951:1:51
+952:0:2887
+953:2:2753
+954:0:2887
+955:1:55
+956:1:56
+957:1:60
+958:1:64
+959:1:65
+960:1:69
+961:1:77
+962:1:78
+963:1:82
+964:1:86
+965:1:87
+966:1:82
+967:1:86
+968:1:87
+969:1:91
+970:1:98
+971:1:105
+972:1:106
+973:1:113
+974:1:118
+975:1:125
+976:1:126
+977:1:125
+978:1:126
+979:1:133
+980:1:137
+981:0:2887
+982:2:2752
+983:0:2887
+984:1:142
+985:0:2887
+986:2:2753
+987:0:2887
+988:2:2754
+989:0:2887
+990:2:2759
+991:0:2887
+992:2:2760
+993:0:2887
+994:2:2768
+995:2:2769
+996:2:2773
+997:2:2777
+998:2:2778
+999:2:2782
+1000:2:2790
+1001:2:2791
+1002:2:2795
+1003:2:2799
+1004:2:2800
+1005:2:2795
+1006:2:2799
+1007:2:2800
+1008:2:2804
+1009:2:2811
+1010:2:2818
+1011:2:2819
+1012:2:2826
+1013:2:2831
+1014:2:2838
+1015:2:2839
+1016:2:2838
+1017:2:2839
+1018:2:2846
+1019:2:2850
+1020:0:2887
+1021:2:2039
+1022:2:2049
+1023:0:2887
+1024:2:1176
+1025:0:2887
+1026:2:2040
+1027:2:2041
+1028:0:2887
+1029:2:1176
+1030:0:2887
+1031:2:2045
+1032:0:2887
+1033:2:2053
+1034:0:2887
+1035:2:1172
+1036:0:2887
+1037:2:1174
+1038:0:2887
+1039:2:1175
+1040:0:2887
+1041:2:1176
+1042:0:2887
+1043:2:1177
+1044:2:1178
+1045:2:1182
+1046:2:1183
+1047:2:1191
+1048:2:1192
+1049:2:1196
+1050:2:1197
+1051:2:1205
+1052:2:1210
+1053:2:1214
+1054:2:1215
+1055:2:1223
+1056:2:1224
+1057:2:1228
+1058:2:1229
+1059:2:1223
+1060:2:1224
+1061:2:1225
+1062:2:1237
+1063:2:1242
+1064:2:1243
+1065:2:1254
+1066:2:1255
+1067:2:1256
+1068:2:1267
+1069:2:1272
+1070:2:1273
+1071:2:1284
+1072:2:1285
+1073:2:1286
+1074:2:1284
+1075:2:1285
+1076:2:1286
+1077:2:1297
+1078:2:1305
+1079:0:2887
+1080:2:1176
+1081:0:2887
+1082:2:1357
+1083:2:1358
+1084:2:1359
+1085:0:2887
+1086:2:1176
+1087:0:2887
+1088:2:1364
+1089:0:2887
+1090:1:143
+1091:0:2887
+1092:1:145
+1093:0:2887
+1094:1:44
+1095:0:2887
+1096:1:151
+1097:1:152
+1098:1:156
+1099:1:157
+1100:1:165
+1101:1:166
+1102:1:170
+1103:1:171
+1104:1:179
+1105:1:184
+1106:1:188
+1107:1:189
+1108:1:197
+1109:1:198
+1110:1:202
+1111:1:203
+1112:1:197
+1113:1:198
+1114:1:202
+1115:1:203
+1116:1:211
+1117:1:216
+1118:1:217
+1119:1:228
+1120:1:229
+1121:1:230
+1122:1:241
+1123:1:246
+1124:1:247
+1125:1:258
+1126:1:259
+1127:1:260
+1128:1:258
+1129:1:259
+1130:1:260
+1131:1:271
+1132:0:2887
+1133:1:40
+1134:0:2887
+1135:1:41
+1136:0:2887
+1137:1:42
+1138:0:2887
+1139:1:143
+1140:0:2887
+1141:1:145
+1142:0:2887
+1143:1:44
+1144:0:2887
+1145:1:280
+1146:1:281
+1147:0:2887
+1148:1:40
+1149:0:2887
+1150:1:41
+1151:0:2887
+1152:1:42
+1153:0:2887
+1154:1:143
+1155:0:2887
+1156:1:145
+1157:0:2887
+1158:1:44
+1159:0:2887
+1160:1:287
+1161:1:288
+1162:1:292
+1163:1:293
+1164:1:301
+1165:1:302
+1166:1:306
+1167:1:307
+1168:1:315
+1169:1:320
+1170:1:324
+1171:1:325
+1172:1:333
+1173:1:334
+1174:1:338
+1175:1:339
+1176:1:333
+1177:1:334
+1178:1:338
+1179:1:339
+1180:1:347
+1181:1:352
+1182:1:353
+1183:1:364
+1184:1:365
+1185:1:366
+1186:1:377
+1187:1:382
+1188:1:383
+1189:1:394
+1190:1:395
+1191:1:396
+1192:1:394
+1193:1:395
+1194:1:396
+1195:1:407
+1196:0:2887
+1197:1:40
+1198:0:2887
+1199:1:41
+1200:0:2887
+1201:1:42
+1202:0:2887
+1203:1:143
+1204:0:2887
+1205:1:145
+1206:0:2887
+1207:1:44
+1208:0:2887
+1209:1:416
+1210:1:417
+1211:1:421
+1212:1:422
+1213:1:430
+1214:1:431
+1215:1:435
+1216:1:436
+1217:1:444
+1218:1:449
+1219:1:453
+1220:1:454
+1221:1:462
+1222:1:463
+1223:1:467
+1224:1:468
+1225:1:462
+1226:1:463
+1227:1:467
+1228:1:468
+1229:1:476
+1230:1:481
+1231:1:482
+1232:1:493
+1233:1:494
+1234:1:495
+1235:1:506
+1236:1:511
+1237:1:512
+1238:1:523
+1239:1:524
+1240:1:525
+1241:1:523
+1242:1:524
+1243:1:525
+1244:1:536
+1245:1:543
+1246:0:2887
+1247:1:40
+1248:0:2887
+1249:1:41
+1250:0:2887
+1251:1:42
+1252:0:2887
+1253:1:143
+1254:0:2887
+1255:1:145
+1256:0:2887
+1257:1:44
+1258:0:2887
+1259:1:681
+1260:1:682
+1261:1:686
+1262:1:687
+1263:1:695
+1264:1:696
+1265:1:697
+1266:1:709
+1267:1:714
+1268:1:718
+1269:1:719
+1270:1:727
+1271:1:728
+1272:1:732
+1273:1:733
+1274:1:727
+1275:1:728
+1276:1:732
+1277:1:733
+1278:1:741
+1279:1:746
+1280:1:747
+1281:1:758
+1282:1:759
+1283:1:760
+1284:1:771
+1285:1:776
+1286:1:777
+1287:1:788
+1288:1:789
+1289:1:790
+1290:1:788
+1291:1:789
+1292:1:790
+1293:1:801
+1294:0:2887
+1295:1:40
+1296:0:2887
+1297:1:41
+1298:0:2887
+1299:1:42
+1300:0:2887
+1301:1:143
+1302:0:2887
+1303:1:145
+1304:0:2887
+1305:1:44
+1306:0:2887
+1307:1:810
+1308:0:2887
+1309:1:1087
+1310:1:1094
+1311:1:1095
+1312:1:1102
+1313:1:1107
+1314:1:1114
+1315:1:1115
+1316:1:1114
+1317:1:1115
+1318:1:1122
+1319:1:1126
+1320:0:2887
+1321:2:2068
+1322:2:2069
+1323:2:2073
+1324:2:2077
+1325:2:2078
+1326:2:2082
+1327:2:2087
+1328:2:2095
+1329:2:2099
+1330:2:2100
+1331:2:2095
+1332:2:2096
+1333:2:2104
+1334:2:2111
+1335:2:2118
+1336:2:2119
+1337:2:2126
+1338:2:2131
+1339:2:2138
+1340:2:2139
+1341:2:2138
+1342:2:2139
+1343:2:2146
+1344:2:2150
+1345:0:2887
+1346:2:2155
+1347:0:2887
+1348:2:2156
+1349:0:2887
+1350:2:2157
+1351:0:2887
+1352:2:2158
+1353:0:2887
+1354:1:812
+1355:1:813
+1356:0:2885
+1357:2:2159
+1358:0:2891
+1359:1:919
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.define b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.define
new file mode 100644 (file)
index 0000000..5e642ef
--- /dev/null
@@ -0,0 +1 @@
+#define SINGLE_FLIP
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.log b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.log
new file mode 100644 (file)
index 0000000..b0c3662
--- /dev/null
@@ -0,0 +1,458 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_single_flip.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1179)
+Depth=    4014 States=    1e+06 Transitions= 1.62e+08 Memory=   512.736        t=    256 R=   4e+03
+pan: claim violated! (at depth 1235)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 4014, errors: 1
+  1411681 states, stored
+2.3165948e+08 states, matched
+2.3307116e+08 transitions (= stored+matched)
+1.3076596e+09 atomic steps
+hash conflicts:  78416855 (resolved)
+
+Stats on memory usage (in Megabytes):
+  156.169      equivalent memory usage for states (stored*(State-vector + overhead))
+   66.078      actual memory usage for states (compression: 42.31%)
+               state-vector as stored = 13 byte + 36 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  531.779      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 14645 1835 1567 2 2 ]
+unreached in proctype urcu_reader
+       line 713, "pan.___", state 12, "((i<1))"
+       line 713, "pan.___", state 12, "((i>=1))"
+       line 268, "pan.___", state 55, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 86, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 102, "(1)"
+       line 249, "pan.___", state 110, "(1)"
+       line 253, "pan.___", state 122, "(1)"
+       line 257, "pan.___", state 130, "(1)"
+       line 404, "pan.___", state 156, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 188, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 202, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 221, "(1)"
+       line 431, "pan.___", state 251, "(1)"
+       line 435, "pan.___", state 264, "(1)"
+       line 614, "pan.___", state 285, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 404, "pan.___", state 292, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 324, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 338, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 357, "(1)"
+       line 431, "pan.___", state 387, "(1)"
+       line 435, "pan.___", state 400, "(1)"
+       line 404, "pan.___", state 421, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 453, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 467, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 486, "(1)"
+       line 431, "pan.___", state 516, "(1)"
+       line 435, "pan.___", state 529, "(1)"
+       line 404, "pan.___", state 552, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 554, "(1)"
+       line 404, "pan.___", state 555, "(cache_dirty_urcu_gp_ctr)"
+       line 404, "pan.___", state 555, "else"
+       line 404, "pan.___", state 558, "(1)"
+       line 408, "pan.___", state 566, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 568, "(1)"
+       line 408, "pan.___", state 569, "(cache_dirty_urcu_active_readers)"
+       line 408, "pan.___", state 569, "else"
+       line 408, "pan.___", state 572, "(1)"
+       line 408, "pan.___", state 573, "(1)"
+       line 408, "pan.___", state 573, "(1)"
+       line 406, "pan.___", state 578, "((i<1))"
+       line 406, "pan.___", state 578, "((i>=1))"
+       line 413, "pan.___", state 584, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 586, "(1)"
+       line 413, "pan.___", state 587, "(cache_dirty_rcu_ptr)"
+       line 413, "pan.___", state 587, "else"
+       line 413, "pan.___", state 590, "(1)"
+       line 413, "pan.___", state 591, "(1)"
+       line 413, "pan.___", state 591, "(1)"
+       line 417, "pan.___", state 598, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 600, "(1)"
+       line 417, "pan.___", state 601, "(cache_dirty_rcu_data[i])"
+       line 417, "pan.___", state 601, "else"
+       line 417, "pan.___", state 604, "(1)"
+       line 417, "pan.___", state 605, "(1)"
+       line 417, "pan.___", state 605, "(1)"
+       line 415, "pan.___", state 610, "((i<2))"
+       line 415, "pan.___", state 610, "((i>=2))"
+       line 422, "pan.___", state 617, "(1)"
+       line 422, "pan.___", state 618, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 618, "else"
+       line 422, "pan.___", state 621, "(1)"
+       line 422, "pan.___", state 622, "(1)"
+       line 422, "pan.___", state 622, "(1)"
+       line 426, "pan.___", state 630, "(1)"
+       line 426, "pan.___", state 631, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 631, "else"
+       line 426, "pan.___", state 634, "(1)"
+       line 426, "pan.___", state 635, "(1)"
+       line 426, "pan.___", state 635, "(1)"
+       line 424, "pan.___", state 640, "((i<1))"
+       line 424, "pan.___", state 640, "((i>=1))"
+       line 431, "pan.___", state 647, "(1)"
+       line 431, "pan.___", state 648, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 648, "else"
+       line 431, "pan.___", state 651, "(1)"
+       line 431, "pan.___", state 652, "(1)"
+       line 431, "pan.___", state 652, "(1)"
+       line 435, "pan.___", state 660, "(1)"
+       line 435, "pan.___", state 661, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 661, "else"
+       line 435, "pan.___", state 664, "(1)"
+       line 435, "pan.___", state 665, "(1)"
+       line 435, "pan.___", state 665, "(1)"
+       line 433, "pan.___", state 670, "((i<2))"
+       line 433, "pan.___", state 670, "((i>=2))"
+       line 443, "pan.___", state 674, "(1)"
+       line 443, "pan.___", state 674, "(1)"
+       line 614, "pan.___", state 677, "cached_urcu_active_readers = (tmp+1)"
+       line 614, "pan.___", state 678, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 614, "pan.___", state 679, "(1)"
+       line 404, "pan.___", state 686, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 718, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 732, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 751, "(1)"
+       line 431, "pan.___", state 781, "(1)"
+       line 435, "pan.___", state 794, "(1)"
+       line 404, "pan.___", state 821, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 853, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 867, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 886, "(1)"
+       line 431, "pan.___", state 916, "(1)"
+       line 435, "pan.___", state 929, "(1)"
+       line 404, "pan.___", state 950, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 982, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 996, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 1015, "(1)"
+       line 431, "pan.___", state 1045, "(1)"
+       line 435, "pan.___", state 1058, "(1)"
+       line 245, "pan.___", state 1091, "(1)"
+       line 253, "pan.___", state 1111, "(1)"
+       line 257, "pan.___", state 1119, "(1)"
+       line 748, "pan.___", state 1136, "-end-"
+       (92 of 1136 states)
+unreached in proctype urcu_writer
+       line 837, "pan.___", state 12, "((i<1))"
+       line 837, "pan.___", state 12, "((i>=1))"
+       line 404, "pan.___", state 49, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 63, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 81, "cache_dirty_rcu_ptr = 0"
+       line 422, "pan.___", state 114, "(1)"
+       line 426, "pan.___", state 127, "(1)"
+       line 431, "pan.___", state 144, "(1)"
+       line 268, "pan.___", state 180, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 189, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 202, "cache_dirty_rcu_ptr = 0"
+       line 404, "pan.___", state 242, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 256, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 274, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 288, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 307, "(1)"
+       line 426, "pan.___", state 320, "(1)"
+       line 431, "pan.___", state 337, "(1)"
+       line 435, "pan.___", state 350, "(1)"
+       line 408, "pan.___", state 387, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 405, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 419, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 451, "(1)"
+       line 431, "pan.___", state 468, "(1)"
+       line 435, "pan.___", state 481, "(1)"
+       line 404, "pan.___", state 511, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 525, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 543, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 557, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 576, "(1)"
+       line 426, "pan.___", state 589, "(1)"
+       line 431, "pan.___", state 606, "(1)"
+       line 435, "pan.___", state 619, "(1)"
+       line 404, "pan.___", state 640, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 642, "(1)"
+       line 404, "pan.___", state 643, "(cache_dirty_urcu_gp_ctr)"
+       line 404, "pan.___", state 643, "else"
+       line 404, "pan.___", state 646, "(1)"
+       line 408, "pan.___", state 654, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 656, "(1)"
+       line 408, "pan.___", state 657, "(cache_dirty_urcu_active_readers)"
+       line 408, "pan.___", state 657, "else"
+       line 408, "pan.___", state 660, "(1)"
+       line 408, "pan.___", state 661, "(1)"
+       line 408, "pan.___", state 661, "(1)"
+       line 406, "pan.___", state 666, "((i<1))"
+       line 406, "pan.___", state 666, "((i>=1))"
+       line 413, "pan.___", state 672, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 674, "(1)"
+       line 413, "pan.___", state 675, "(cache_dirty_rcu_ptr)"
+       line 413, "pan.___", state 675, "else"
+       line 413, "pan.___", state 678, "(1)"
+       line 413, "pan.___", state 679, "(1)"
+       line 413, "pan.___", state 679, "(1)"
+       line 417, "pan.___", state 686, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 688, "(1)"
+       line 417, "pan.___", state 689, "(cache_dirty_rcu_data[i])"
+       line 417, "pan.___", state 689, "else"
+       line 417, "pan.___", state 692, "(1)"
+       line 417, "pan.___", state 693, "(1)"
+       line 417, "pan.___", state 693, "(1)"
+       line 415, "pan.___", state 698, "((i<2))"
+       line 415, "pan.___", state 698, "((i>=2))"
+       line 422, "pan.___", state 705, "(1)"
+       line 422, "pan.___", state 706, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 706, "else"
+       line 422, "pan.___", state 709, "(1)"
+       line 422, "pan.___", state 710, "(1)"
+       line 422, "pan.___", state 710, "(1)"
+       line 426, "pan.___", state 718, "(1)"
+       line 426, "pan.___", state 719, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 719, "else"
+       line 426, "pan.___", state 722, "(1)"
+       line 426, "pan.___", state 723, "(1)"
+       line 426, "pan.___", state 723, "(1)"
+       line 424, "pan.___", state 728, "((i<1))"
+       line 424, "pan.___", state 728, "((i>=1))"
+       line 431, "pan.___", state 735, "(1)"
+       line 431, "pan.___", state 736, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 736, "else"
+       line 431, "pan.___", state 739, "(1)"
+       line 431, "pan.___", state 740, "(1)"
+       line 431, "pan.___", state 740, "(1)"
+       line 435, "pan.___", state 748, "(1)"
+       line 435, "pan.___", state 749, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 749, "else"
+       line 435, "pan.___", state 752, "(1)"
+       line 435, "pan.___", state 753, "(1)"
+       line 435, "pan.___", state 753, "(1)"
+       line 433, "pan.___", state 758, "((i<2))"
+       line 433, "pan.___", state 758, "((i>=2))"
+       line 443, "pan.___", state 762, "(1)"
+       line 443, "pan.___", state 762, "(1)"
+       line 1003, "pan.___", state 766, "_proc_urcu_writer = (_proc_urcu_writer|(1<<10))"
+       line 404, "pan.___", state 771, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 773, "(1)"
+       line 404, "pan.___", state 774, "(cache_dirty_urcu_gp_ctr)"
+       line 404, "pan.___", state 774, "else"
+       line 404, "pan.___", state 777, "(1)"
+       line 408, "pan.___", state 785, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 787, "(1)"
+       line 408, "pan.___", state 788, "(cache_dirty_urcu_active_readers)"
+       line 408, "pan.___", state 788, "else"
+       line 408, "pan.___", state 791, "(1)"
+       line 408, "pan.___", state 792, "(1)"
+       line 408, "pan.___", state 792, "(1)"
+       line 406, "pan.___", state 797, "((i<1))"
+       line 406, "pan.___", state 797, "((i>=1))"
+       line 413, "pan.___", state 803, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 805, "(1)"
+       line 413, "pan.___", state 806, "(cache_dirty_rcu_ptr)"
+       line 413, "pan.___", state 806, "else"
+       line 413, "pan.___", state 809, "(1)"
+       line 413, "pan.___", state 810, "(1)"
+       line 413, "pan.___", state 810, "(1)"
+       line 417, "pan.___", state 817, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 819, "(1)"
+       line 417, "pan.___", state 820, "(cache_dirty_rcu_data[i])"
+       line 417, "pan.___", state 820, "else"
+       line 417, "pan.___", state 823, "(1)"
+       line 417, "pan.___", state 824, "(1)"
+       line 417, "pan.___", state 824, "(1)"
+       line 415, "pan.___", state 829, "((i<2))"
+       line 415, "pan.___", state 829, "((i>=2))"
+       line 422, "pan.___", state 836, "(1)"
+       line 422, "pan.___", state 837, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 837, "else"
+       line 422, "pan.___", state 840, "(1)"
+       line 422, "pan.___", state 841, "(1)"
+       line 422, "pan.___", state 841, "(1)"
+       line 426, "pan.___", state 849, "(1)"
+       line 426, "pan.___", state 850, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 850, "else"
+       line 426, "pan.___", state 853, "(1)"
+       line 426, "pan.___", state 854, "(1)"
+       line 426, "pan.___", state 854, "(1)"
+       line 424, "pan.___", state 859, "((i<1))"
+       line 424, "pan.___", state 859, "((i>=1))"
+       line 431, "pan.___", state 866, "(1)"
+       line 431, "pan.___", state 867, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 867, "else"
+       line 431, "pan.___", state 870, "(1)"
+       line 431, "pan.___", state 871, "(1)"
+       line 431, "pan.___", state 871, "(1)"
+       line 435, "pan.___", state 879, "(1)"
+       line 435, "pan.___", state 880, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 880, "else"
+       line 435, "pan.___", state 883, "(1)"
+       line 435, "pan.___", state 884, "(1)"
+       line 435, "pan.___", state 884, "(1)"
+       line 433, "pan.___", state 889, "((i<2))"
+       line 433, "pan.___", state 889, "((i>=2))"
+       line 443, "pan.___", state 893, "(1)"
+       line 443, "pan.___", state 893, "(1)"
+       line 1019, "pan.___", state 898, "_proc_urcu_writer = (_proc_urcu_writer|(1<<11))"
+       line 1014, "pan.___", state 899, "(((tmp2&((1<<7)-1))&&((tmp2^0)&(1<<7))))"
+       line 1014, "pan.___", state 899, "else"
+       line 1039, "pan.___", state 903, "_proc_urcu_writer = (_proc_urcu_writer&~(((1<<12)|(1<<11))))"
+       line 268, "pan.___", state 934, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 943, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 958, "(1)"
+       line 280, "pan.___", state 965, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 981, "(1)"
+       line 249, "pan.___", state 989, "(1)"
+       line 253, "pan.___", state 1001, "(1)"
+       line 257, "pan.___", state 1009, "(1)"
+       line 268, "pan.___", state 1040, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1049, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1062, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1071, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1087, "(1)"
+       line 249, "pan.___", state 1095, "(1)"
+       line 253, "pan.___", state 1107, "(1)"
+       line 257, "pan.___", state 1115, "(1)"
+       line 272, "pan.___", state 1141, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1154, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1163, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1179, "(1)"
+       line 249, "pan.___", state 1187, "(1)"
+       line 253, "pan.___", state 1199, "(1)"
+       line 257, "pan.___", state 1207, "(1)"
+       line 268, "pan.___", state 1238, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1247, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1260, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1269, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1285, "(1)"
+       line 249, "pan.___", state 1293, "(1)"
+       line 253, "pan.___", state 1305, "(1)"
+       line 257, "pan.___", state 1313, "(1)"
+       line 268, "pan.___", state 1330, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1332, "(1)"
+       line 272, "pan.___", state 1339, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1341, "(1)"
+       line 272, "pan.___", state 1342, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1342, "else"
+       line 270, "pan.___", state 1347, "((i<1))"
+       line 270, "pan.___", state 1347, "((i>=1))"
+       line 276, "pan.___", state 1352, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1354, "(1)"
+       line 276, "pan.___", state 1355, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1355, "else"
+       line 280, "pan.___", state 1361, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1363, "(1)"
+       line 280, "pan.___", state 1364, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1364, "else"
+       line 278, "pan.___", state 1369, "((i<2))"
+       line 278, "pan.___", state 1369, "((i>=2))"
+       line 245, "pan.___", state 1377, "(1)"
+       line 249, "pan.___", state 1385, "(1)"
+       line 249, "pan.___", state 1386, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1386, "else"
+       line 247, "pan.___", state 1391, "((i<1))"
+       line 247, "pan.___", state 1391, "((i>=1))"
+       line 253, "pan.___", state 1397, "(1)"
+       line 253, "pan.___", state 1398, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1398, "else"
+       line 257, "pan.___", state 1405, "(1)"
+       line 257, "pan.___", state 1406, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1406, "else"
+       line 262, "pan.___", state 1415, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1415, "else"
+       line 1115, "pan.___", state 1418, "i = 0"
+       line 1115, "pan.___", state 1420, "reader_barrier = 1"
+       line 1115, "pan.___", state 1431, "((i<1))"
+       line 1115, "pan.___", state 1431, "((i>=1))"
+       line 268, "pan.___", state 1436, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1438, "(1)"
+       line 272, "pan.___", state 1445, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1447, "(1)"
+       line 272, "pan.___", state 1448, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1448, "else"
+       line 270, "pan.___", state 1453, "((i<1))"
+       line 270, "pan.___", state 1453, "((i>=1))"
+       line 276, "pan.___", state 1458, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1460, "(1)"
+       line 276, "pan.___", state 1461, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1461, "else"
+       line 280, "pan.___", state 1467, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1469, "(1)"
+       line 280, "pan.___", state 1470, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1470, "else"
+       line 278, "pan.___", state 1475, "((i<2))"
+       line 278, "pan.___", state 1475, "((i>=2))"
+       line 245, "pan.___", state 1483, "(1)"
+       line 249, "pan.___", state 1491, "(1)"
+       line 249, "pan.___", state 1492, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1492, "else"
+       line 247, "pan.___", state 1497, "((i<1))"
+       line 247, "pan.___", state 1497, "((i>=1))"
+       line 253, "pan.___", state 1503, "(1)"
+       line 253, "pan.___", state 1504, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1504, "else"
+       line 257, "pan.___", state 1511, "(1)"
+       line 257, "pan.___", state 1512, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1512, "else"
+       line 262, "pan.___", state 1521, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1521, "else"
+       line 295, "pan.___", state 1523, "(cache_dirty_urcu_gp_ctr)"
+       line 295, "pan.___", state 1523, "else"
+       line 1115, "pan.___", state 1524, "(cache_dirty_urcu_gp_ctr)"
+       line 1115, "pan.___", state 1524, "else"
+       line 272, "pan.___", state 1537, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1550, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1559, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1575, "(1)"
+       line 249, "pan.___", state 1583, "(1)"
+       line 253, "pan.___", state 1595, "(1)"
+       line 257, "pan.___", state 1603, "(1)"
+       line 268, "pan.___", state 1634, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1643, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1656, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1665, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1681, "(1)"
+       line 249, "pan.___", state 1689, "(1)"
+       line 253, "pan.___", state 1701, "(1)"
+       line 257, "pan.___", state 1709, "(1)"
+       line 1123, "pan.___", state 1725, "-end-"
+       (212 of 1725 states)
+unreached in proctype :init:
+       line 1138, "pan.___", state 11, "((i<1))"
+       line 1138, "pan.___", state 11, "((i>=1))"
+       (1 of 26 states)
+unreached in proctype :never:
+       line 1184, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 372 seconds
+pan: rate 3796.6785 states/second
+pan: avg transition delay 1.5953e-06 usec
+cp .input.spin urcu_free_single_flip.spin.input
+cp .input.spin.trail urcu_free_single_flip.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.spin.input b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.spin.input
new file mode 100644 (file)
index 0000000..3af0fbc
--- /dev/null
@@ -0,0 +1,1157 @@
+#define SINGLE_FLIP
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.spin.input.trail b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_free_single_flip.spin.input.trail
new file mode 100644 (file)
index 0000000..53de47d
--- /dev/null
@@ -0,0 +1,1238 @@
+-2:3:-2
+-4:-4:-4
+1:0:2889
+2:2:1136
+3:2:1141
+4:2:1145
+5:2:1153
+6:2:1157
+7:2:1161
+8:0:2889
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:2889
+16:3:2861
+17:3:2864
+18:3:2869
+19:3:2876
+20:3:2879
+21:3:2883
+22:3:2884
+23:0:2889
+24:3:2886
+25:0:2889
+26:2:1165
+27:0:2889
+28:2:1171
+29:0:2889
+30:2:1172
+31:0:2889
+32:2:1174
+33:0:2889
+34:2:1175
+35:0:2889
+36:2:1176
+37:0:2889
+38:2:1177
+39:0:2889
+40:2:1178
+41:0:2889
+42:2:1179
+43:0:2889
+44:2:1180
+45:2:1181
+46:2:1185
+47:2:1186
+48:2:1194
+49:2:1195
+50:2:1199
+51:2:1200
+52:2:1208
+53:2:1213
+54:2:1217
+55:2:1218
+56:2:1226
+57:2:1227
+58:2:1231
+59:2:1232
+60:2:1226
+61:2:1227
+62:2:1231
+63:2:1232
+64:2:1240
+65:2:1245
+66:2:1246
+67:2:1257
+68:2:1258
+69:2:1259
+70:2:1270
+71:2:1275
+72:2:1276
+73:2:1287
+74:2:1288
+75:2:1289
+76:2:1287
+77:2:1288
+78:2:1289
+79:2:1300
+80:2:1308
+81:0:2889
+82:2:1179
+83:0:2889
+84:2:1312
+85:2:1316
+86:2:1317
+87:2:1321
+88:2:1325
+89:2:1326
+90:2:1330
+91:2:1338
+92:2:1339
+93:2:1343
+94:2:1347
+95:2:1348
+96:2:1343
+97:2:1344
+98:2:1352
+99:0:2889
+100:2:1179
+101:0:2889
+102:2:1360
+103:2:1361
+104:2:1362
+105:0:2889
+106:2:1179
+107:0:2889
+108:2:1367
+109:0:2889
+110:2:2070
+111:2:2071
+112:2:2075
+113:2:2079
+114:2:2080
+115:2:2084
+116:2:2089
+117:2:2097
+118:2:2101
+119:2:2102
+120:2:2097
+121:2:2101
+122:2:2102
+123:2:2106
+124:2:2113
+125:2:2120
+126:2:2121
+127:2:2128
+128:2:2133
+129:2:2140
+130:2:2141
+131:2:2140
+132:2:2141
+133:2:2148
+134:2:2152
+135:0:2889
+136:2:2157
+137:0:2889
+138:2:2158
+139:0:2889
+140:2:2159
+141:0:2889
+142:2:2160
+143:0:2889
+144:1:29
+145:0:2889
+146:2:2161
+147:0:2889
+148:1:35
+149:0:2889
+150:1:36
+151:0:2889
+152:2:2160
+153:0:2889
+154:1:37
+155:0:2889
+156:2:2161
+157:0:2889
+158:1:38
+159:0:2889
+160:2:2160
+161:0:2889
+162:1:39
+163:0:2889
+164:2:2161
+165:0:2889
+166:1:40
+167:0:2889
+168:1:41
+169:0:2889
+170:2:2160
+171:0:2889
+172:1:42
+173:0:2889
+174:2:2161
+175:0:2889
+176:1:51
+177:0:2889
+178:2:2160
+179:0:2889
+180:1:55
+181:1:56
+182:1:60
+183:1:64
+184:1:65
+185:1:69
+186:1:77
+187:1:78
+188:1:82
+189:1:86
+190:1:87
+191:1:82
+192:1:86
+193:1:87
+194:1:91
+195:1:98
+196:1:105
+197:1:106
+198:1:113
+199:1:118
+200:1:125
+201:1:126
+202:1:125
+203:1:126
+204:1:133
+205:1:137
+206:0:2889
+207:2:2161
+208:0:2889
+209:1:142
+210:0:2889
+211:2:2162
+212:0:2889
+213:2:2167
+214:0:2889
+215:2:2168
+216:0:2889
+217:2:2176
+218:2:2177
+219:2:2181
+220:2:2185
+221:2:2186
+222:2:2190
+223:2:2198
+224:2:2199
+225:2:2203
+226:2:2207
+227:2:2208
+228:2:2203
+229:2:2207
+230:2:2208
+231:2:2212
+232:2:2219
+233:2:2226
+234:2:2227
+235:2:2234
+236:2:2239
+237:2:2246
+238:2:2247
+239:2:2246
+240:2:2247
+241:2:2254
+242:2:2258
+243:0:2889
+244:2:1369
+245:2:2051
+246:0:2889
+247:2:1179
+248:0:2889
+249:2:1370
+250:0:2889
+251:2:1179
+252:0:2889
+253:2:1373
+254:2:1374
+255:2:1378
+256:2:1379
+257:2:1387
+258:2:1388
+259:2:1392
+260:2:1393
+261:2:1401
+262:2:1406
+263:2:1410
+264:2:1411
+265:2:1419
+266:2:1420
+267:2:1424
+268:2:1425
+269:2:1419
+270:2:1420
+271:2:1424
+272:2:1425
+273:2:1433
+274:2:1438
+275:2:1439
+276:2:1450
+277:2:1451
+278:2:1452
+279:2:1463
+280:2:1468
+281:2:1469
+282:2:1480
+283:2:1481
+284:2:1482
+285:2:1480
+286:2:1481
+287:2:1482
+288:2:1493
+289:2:1500
+290:0:2889
+291:2:1179
+292:0:2889
+293:2:1504
+294:2:1505
+295:2:1506
+296:2:1518
+297:2:1519
+298:2:1523
+299:2:1524
+300:2:1532
+301:2:1537
+302:2:1541
+303:2:1542
+304:2:1550
+305:2:1551
+306:2:1555
+307:2:1556
+308:2:1550
+309:2:1551
+310:2:1555
+311:2:1556
+312:2:1564
+313:2:1569
+314:2:1570
+315:2:1581
+316:2:1582
+317:2:1583
+318:2:1594
+319:2:1599
+320:2:1600
+321:2:1611
+322:2:1612
+323:2:1613
+324:2:1611
+325:2:1612
+326:2:1613
+327:2:1624
+328:2:1634
+329:2:1635
+330:0:2889
+331:2:1179
+332:0:2889
+333:2:2039
+334:0:2889
+335:2:2664
+336:2:2665
+337:2:2669
+338:2:2673
+339:2:2674
+340:2:2678
+341:2:2686
+342:2:2687
+343:2:2691
+344:2:2695
+345:2:2696
+346:2:2691
+347:2:2695
+348:2:2696
+349:2:2700
+350:2:2707
+351:2:2714
+352:2:2715
+353:2:2722
+354:2:2727
+355:2:2734
+356:2:2735
+357:2:2734
+358:2:2735
+359:2:2742
+360:2:2746
+361:0:2889
+362:2:2751
+363:0:2889
+364:2:2752
+365:0:2889
+366:2:2753
+367:0:2889
+368:2:2754
+369:0:2889
+370:1:143
+371:0:2889
+372:2:2755
+373:0:2889
+374:1:145
+375:0:2889
+376:2:2754
+377:0:2889
+378:1:44
+379:0:2889
+380:2:2755
+381:0:2889
+382:1:151
+383:1:152
+384:1:156
+385:1:157
+386:1:165
+387:1:166
+388:1:170
+389:1:171
+390:1:179
+391:1:184
+392:1:188
+393:1:189
+394:1:197
+395:1:198
+396:1:202
+397:1:203
+398:1:197
+399:1:198
+400:1:202
+401:1:203
+402:1:211
+403:1:223
+404:1:224
+405:1:228
+406:1:229
+407:1:230
+408:1:241
+409:1:246
+410:1:247
+411:1:258
+412:1:259
+413:1:260
+414:1:258
+415:1:259
+416:1:260
+417:1:271
+418:0:2889
+419:1:40
+420:0:2889
+421:1:41
+422:0:2889
+423:2:2754
+424:0:2889
+425:1:42
+426:0:2889
+427:2:2755
+428:0:2889
+429:1:143
+430:0:2889
+431:1:145
+432:0:2889
+433:2:2754
+434:0:2889
+435:1:44
+436:0:2889
+437:2:2755
+438:0:2889
+439:1:280
+440:1:281
+441:0:2889
+442:1:40
+443:0:2889
+444:1:41
+445:0:2889
+446:2:2754
+447:0:2889
+448:1:42
+449:0:2889
+450:2:2755
+451:0:2889
+452:1:143
+453:0:2889
+454:1:145
+455:0:2889
+456:2:2754
+457:0:2889
+458:1:44
+459:0:2889
+460:2:2755
+461:0:2889
+462:1:287
+463:1:288
+464:1:292
+465:1:293
+466:1:301
+467:1:302
+468:1:306
+469:1:307
+470:1:315
+471:1:320
+472:1:324
+473:1:325
+474:1:333
+475:1:334
+476:1:338
+477:1:339
+478:1:333
+479:1:334
+480:1:338
+481:1:339
+482:1:347
+483:1:359
+484:1:360
+485:1:364
+486:1:365
+487:1:366
+488:1:377
+489:1:382
+490:1:383
+491:1:394
+492:1:395
+493:1:396
+494:1:394
+495:1:395
+496:1:396
+497:1:407
+498:0:2889
+499:1:40
+500:0:2889
+501:1:41
+502:0:2889
+503:2:2754
+504:0:2889
+505:1:42
+506:0:2889
+507:2:2755
+508:0:2889
+509:1:51
+510:0:2889
+511:2:2754
+512:0:2889
+513:1:55
+514:1:56
+515:1:60
+516:1:64
+517:1:65
+518:1:69
+519:1:77
+520:1:78
+521:1:82
+522:1:86
+523:1:87
+524:1:82
+525:1:86
+526:1:87
+527:1:91
+528:1:98
+529:1:105
+530:1:106
+531:1:113
+532:1:118
+533:1:125
+534:1:126
+535:1:125
+536:1:126
+537:1:133
+538:1:137
+539:0:2889
+540:2:2755
+541:0:2889
+542:1:142
+543:0:2889
+544:2:2756
+545:0:2889
+546:2:2761
+547:0:2889
+548:2:2762
+549:0:2889
+550:2:2770
+551:2:2771
+552:2:2775
+553:2:2779
+554:2:2780
+555:2:2784
+556:2:2792
+557:2:2793
+558:2:2797
+559:2:2801
+560:2:2802
+561:2:2797
+562:2:2801
+563:2:2802
+564:2:2806
+565:2:2813
+566:2:2820
+567:2:2821
+568:2:2828
+569:2:2833
+570:2:2840
+571:2:2841
+572:2:2840
+573:2:2841
+574:2:2848
+575:2:2852
+576:0:2889
+577:2:2041
+578:2:2051
+579:0:2889
+580:2:1179
+581:0:2889
+582:2:2042
+583:2:2043
+584:0:2889
+585:2:1179
+586:0:2889
+587:2:2047
+588:0:2889
+589:2:2055
+590:0:2889
+591:2:1172
+592:0:2889
+593:2:1174
+594:0:2889
+595:2:1175
+596:0:2889
+597:2:1176
+598:0:2889
+599:2:1177
+600:0:2889
+601:2:1178
+602:0:2889
+603:2:1179
+604:0:2889
+605:2:1180
+606:2:1181
+607:2:1185
+608:2:1186
+609:2:1194
+610:2:1195
+611:2:1199
+612:2:1200
+613:2:1208
+614:2:1213
+615:2:1217
+616:2:1218
+617:2:1226
+618:2:1227
+619:2:1228
+620:2:1226
+621:2:1227
+622:2:1231
+623:2:1232
+624:2:1240
+625:2:1245
+626:2:1246
+627:2:1257
+628:2:1258
+629:2:1259
+630:2:1270
+631:2:1275
+632:2:1276
+633:2:1287
+634:2:1288
+635:2:1289
+636:2:1287
+637:2:1288
+638:2:1289
+639:2:1300
+640:2:1308
+641:0:2889
+642:2:1179
+643:0:2889
+644:2:1312
+645:2:1316
+646:2:1317
+647:2:1321
+648:2:1325
+649:2:1326
+650:2:1330
+651:2:1338
+652:2:1339
+653:2:1343
+654:2:1344
+655:2:1343
+656:2:1347
+657:2:1348
+658:2:1352
+659:0:2889
+660:2:1179
+661:0:2889
+662:2:1360
+663:2:1361
+664:2:1362
+665:0:2889
+666:2:1179
+667:0:2889
+668:2:1367
+669:0:2889
+670:2:2070
+671:2:2071
+672:2:2075
+673:2:2079
+674:2:2080
+675:2:2084
+676:2:2089
+677:2:2097
+678:2:2101
+679:2:2102
+680:2:2097
+681:2:2101
+682:2:2102
+683:2:2106
+684:2:2113
+685:2:2120
+686:2:2121
+687:2:2128
+688:2:2133
+689:2:2140
+690:2:2141
+691:2:2140
+692:2:2141
+693:2:2148
+694:2:2152
+695:0:2889
+696:2:2157
+697:0:2889
+698:2:2158
+699:0:2889
+700:2:2159
+701:0:2889
+702:2:2160
+703:0:2889
+704:1:143
+705:0:2889
+706:2:2161
+707:0:2889
+708:1:145
+709:0:2889
+710:2:2160
+711:0:2889
+712:1:44
+713:0:2889
+714:2:2161
+715:0:2889
+716:1:416
+717:1:417
+718:1:421
+719:1:422
+720:1:430
+721:1:431
+722:1:435
+723:1:436
+724:1:444
+725:1:449
+726:1:453
+727:1:454
+728:1:462
+729:1:463
+730:1:467
+731:1:468
+732:1:462
+733:1:463
+734:1:467
+735:1:468
+736:1:476
+737:1:481
+738:1:482
+739:1:493
+740:1:494
+741:1:495
+742:1:506
+743:1:518
+744:1:519
+745:1:523
+746:1:524
+747:1:525
+748:1:523
+749:1:524
+750:1:525
+751:1:536
+752:1:543
+753:0:2889
+754:1:40
+755:0:2889
+756:1:41
+757:0:2889
+758:2:2160
+759:0:2889
+760:1:42
+761:0:2889
+762:2:2161
+763:0:2889
+764:1:143
+765:0:2889
+766:1:145
+767:0:2889
+768:2:2160
+769:0:2889
+770:1:44
+771:0:2889
+772:2:2161
+773:0:2889
+774:1:681
+775:1:682
+776:1:686
+777:1:687
+778:1:695
+779:1:696
+780:1:697
+781:1:709
+782:1:714
+783:1:718
+784:1:719
+785:1:727
+786:1:728
+787:1:732
+788:1:733
+789:1:727
+790:1:728
+791:1:732
+792:1:733
+793:1:741
+794:1:746
+795:1:747
+796:1:758
+797:1:759
+798:1:760
+799:1:771
+800:1:783
+801:1:784
+802:1:788
+803:1:789
+804:1:790
+805:1:788
+806:1:789
+807:1:790
+808:1:801
+809:0:2889
+810:1:40
+811:0:2889
+812:1:41
+813:0:2889
+814:2:2160
+815:0:2889
+816:1:42
+817:0:2889
+818:2:2161
+819:0:2889
+820:1:51
+821:0:2889
+822:2:2160
+823:0:2889
+824:1:55
+825:1:56
+826:1:60
+827:1:64
+828:1:65
+829:1:69
+830:1:77
+831:1:78
+832:1:82
+833:1:86
+834:1:87
+835:1:82
+836:1:86
+837:1:87
+838:1:91
+839:1:98
+840:1:105
+841:1:106
+842:1:113
+843:1:118
+844:1:125
+845:1:126
+846:1:125
+847:1:126
+848:1:133
+849:1:137
+850:0:2889
+851:2:2161
+852:0:2889
+853:1:142
+854:0:2889
+855:2:2162
+856:0:2889
+857:2:2167
+858:0:2889
+859:2:2168
+860:0:2889
+861:2:2176
+862:2:2177
+863:2:2181
+864:2:2185
+865:2:2186
+866:2:2190
+867:2:2198
+868:2:2199
+869:2:2203
+870:2:2207
+871:2:2208
+872:2:2203
+873:2:2207
+874:2:2208
+875:2:2212
+876:2:2219
+877:2:2226
+878:2:2227
+879:2:2234
+880:2:2239
+881:2:2246
+882:2:2247
+883:2:2246
+884:2:2247
+885:2:2254
+886:2:2258
+887:0:2889
+888:2:1369
+889:2:2051
+890:0:2889
+891:2:1179
+892:0:2889
+893:2:1370
+894:0:2889
+895:2:1179
+896:0:2889
+897:2:1373
+898:2:1374
+899:2:1378
+900:2:1379
+901:2:1387
+902:2:1388
+903:2:1392
+904:2:1393
+905:2:1401
+906:2:1406
+907:2:1410
+908:2:1411
+909:2:1419
+910:2:1420
+911:2:1424
+912:2:1425
+913:2:1419
+914:2:1420
+915:2:1424
+916:2:1425
+917:2:1433
+918:2:1438
+919:2:1439
+920:2:1450
+921:2:1451
+922:2:1452
+923:2:1463
+924:2:1468
+925:2:1469
+926:2:1480
+927:2:1481
+928:2:1482
+929:2:1480
+930:2:1481
+931:2:1482
+932:2:1493
+933:2:1500
+934:0:2889
+935:2:1179
+936:0:2889
+937:2:1504
+938:2:1505
+939:2:1506
+940:2:1518
+941:2:1519
+942:2:1523
+943:2:1524
+944:2:1532
+945:2:1537
+946:2:1541
+947:2:1542
+948:2:1550
+949:2:1551
+950:2:1555
+951:2:1556
+952:2:1550
+953:2:1551
+954:2:1555
+955:2:1556
+956:2:1564
+957:2:1569
+958:2:1570
+959:2:1581
+960:2:1582
+961:2:1583
+962:2:1594
+963:2:1599
+964:2:1600
+965:2:1611
+966:2:1612
+967:2:1613
+968:2:1611
+969:2:1612
+970:2:1613
+971:2:1624
+972:2:1634
+973:2:1635
+974:0:2889
+975:2:1179
+976:0:2889
+977:2:2039
+978:0:2889
+979:2:2664
+980:2:2665
+981:2:2669
+982:2:2673
+983:2:2674
+984:2:2678
+985:2:2686
+986:2:2687
+987:2:2691
+988:2:2695
+989:2:2696
+990:2:2691
+991:2:2695
+992:2:2696
+993:2:2700
+994:2:2707
+995:2:2714
+996:2:2715
+997:2:2722
+998:2:2727
+999:2:2734
+1000:2:2735
+1001:2:2734
+1002:2:2735
+1003:2:2742
+1004:2:2746
+1005:0:2889
+1006:2:2751
+1007:0:2889
+1008:2:2752
+1009:0:2889
+1010:2:2753
+1011:0:2889
+1012:2:2754
+1013:0:2889
+1014:1:51
+1015:0:2889
+1016:2:2755
+1017:0:2889
+1018:1:55
+1019:1:56
+1020:1:60
+1021:1:64
+1022:1:65
+1023:1:69
+1024:1:77
+1025:1:78
+1026:1:82
+1027:1:86
+1028:1:87
+1029:1:82
+1030:1:86
+1031:1:87
+1032:1:91
+1033:1:98
+1034:1:105
+1035:1:106
+1036:1:113
+1037:1:118
+1038:1:125
+1039:1:126
+1040:1:125
+1041:1:126
+1042:1:133
+1043:1:137
+1044:0:2889
+1045:2:2754
+1046:0:2889
+1047:1:142
+1048:0:2889
+1049:2:2755
+1050:0:2889
+1051:2:2756
+1052:0:2889
+1053:2:2761
+1054:0:2889
+1055:2:2762
+1056:0:2889
+1057:2:2770
+1058:2:2771
+1059:2:2775
+1060:2:2779
+1061:2:2780
+1062:2:2784
+1063:2:2792
+1064:2:2793
+1065:2:2797
+1066:2:2801
+1067:2:2802
+1068:2:2797
+1069:2:2801
+1070:2:2802
+1071:2:2806
+1072:2:2813
+1073:2:2820
+1074:2:2821
+1075:2:2828
+1076:2:2833
+1077:2:2840
+1078:2:2841
+1079:2:2840
+1080:2:2841
+1081:2:2848
+1082:2:2852
+1083:0:2889
+1084:2:2041
+1085:2:2051
+1086:0:2889
+1087:2:1179
+1088:0:2889
+1089:2:2042
+1090:2:2043
+1091:0:2889
+1092:2:1179
+1093:0:2889
+1094:2:2047
+1095:0:2889
+1096:2:2055
+1097:0:2889
+1098:2:1172
+1099:0:2889
+1100:2:1174
+1101:0:2889
+1102:2:1175
+1103:0:2889
+1104:2:1176
+1105:0:2889
+1106:2:1177
+1107:0:2889
+1108:2:1178
+1109:0:2889
+1110:2:1179
+1111:0:2889
+1112:2:1180
+1113:2:1181
+1114:2:1185
+1115:2:1186
+1116:2:1194
+1117:2:1195
+1118:2:1199
+1119:2:1200
+1120:2:1208
+1121:2:1213
+1122:2:1217
+1123:2:1218
+1124:2:1226
+1125:2:1227
+1126:2:1231
+1127:2:1232
+1128:2:1226
+1129:2:1227
+1130:2:1228
+1131:2:1240
+1132:2:1245
+1133:2:1246
+1134:2:1257
+1135:2:1258
+1136:2:1259
+1137:2:1270
+1138:2:1275
+1139:2:1276
+1140:2:1287
+1141:2:1288
+1142:2:1289
+1143:2:1287
+1144:2:1288
+1145:2:1289
+1146:2:1300
+1147:2:1308
+1148:0:2889
+1149:2:1179
+1150:0:2889
+1151:1:143
+1152:0:2889
+1153:1:145
+1154:0:2889
+1155:1:44
+1156:0:2889
+1157:1:810
+1158:0:2889
+1159:1:1087
+1160:1:1094
+1161:1:1095
+1162:1:1102
+1163:1:1107
+1164:1:1114
+1165:1:1115
+1166:1:1114
+1167:1:1115
+1168:1:1122
+1169:1:1126
+1170:0:2889
+1171:2:1312
+1172:2:1316
+1173:2:1317
+1174:2:1321
+1175:2:1325
+1176:2:1326
+1177:2:1330
+1178:2:1338
+1179:2:1339
+1180:2:1343
+1181:2:1347
+1182:2:1348
+1183:2:1343
+1184:2:1344
+1185:2:1352
+1186:0:2889
+1187:2:1179
+1188:0:2889
+1189:2:1360
+1190:2:1361
+1191:2:1362
+1192:0:2889
+1193:2:1179
+1194:0:2889
+1195:2:1367
+1196:0:2889
+1197:2:2070
+1198:2:2071
+1199:2:2075
+1200:2:2079
+1201:2:2080
+1202:2:2084
+1203:2:2089
+1204:2:2097
+1205:2:2101
+1206:2:2102
+1207:2:2097
+1208:2:2101
+1209:2:2102
+1210:2:2106
+1211:2:2113
+1212:2:2120
+1213:2:2121
+1214:2:2128
+1215:2:2133
+1216:2:2140
+1217:2:2141
+1218:2:2140
+1219:2:2141
+1220:2:2148
+1221:2:2152
+1222:0:2889
+1223:2:2157
+1224:0:2889
+1225:2:2158
+1226:0:2889
+1227:2:2159
+1228:0:2889
+1229:2:2160
+1230:0:2889
+1231:1:812
+1232:1:813
+1233:0:2887
+1234:2:2161
+1235:0:2893
+1236:1:1005
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress.ltl b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress.ltl
new file mode 100644 (file)
index 0000000..8718641
--- /dev/null
@@ -0,0 +1 @@
+([] <> !np_)
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_reader.define b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_reader.define
new file mode 100644 (file)
index 0000000..ff3f783
--- /dev/null
@@ -0,0 +1 @@
+#define READER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_reader.log b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_reader.log
new file mode 100644 (file)
index 0000000..0fc4f7a
--- /dev/null
@@ -0,0 +1,305 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_reader.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1179)
+depth 7: Claim reached state 9 (line 1184)
+depth 136: Claim reached state 9 (line 1183)
+Depth=   12986 States=    1e+06 Transitions= 1.85e+08 Memory=   494.963        t=    294 R=   3e+03
+Depth=   12986 States=    2e+06 Transitions= 6.44e+08 Memory=   520.744        t= 1.06e+03 R=   2e+03
+Depth=   12986 States=    3e+06 Transitions= 1.24e+09 Memory=   545.842        t= 2.08e+03 R=   1e+03
+pan: resizing hashtable to -w22..  done
+Depth=   12986 States=    4e+06 Transitions= 1.65e+09 Memory=   602.940        t= 2.76e+03 R=   1e+03
+Depth=   12986 States=    5e+06 Transitions=    2e+09 Memory=   629.111        t= 3.34e+03 R=   1e+03
+Depth=   12986 States=    6e+06 Transitions= 2.47e+09 Memory=   655.283        t= 4.12e+03 R=   1e+03
+Depth=   12986 States=    7e+06 Transitions= 2.94e+09 Memory=   681.260        t= 4.93e+03 R=   1e+03
+Depth=   12986 States=    8e+06 Transitions= 3.45e+09 Memory=   705.967        t= 5.79e+03 R=   1e+03
+Depth=   12986 States=    9e+06 Transitions= 3.84e+09 Memory=   732.529        t= 6.46e+03 R=   1e+03
+pan: resizing hashtable to -w24..  done
+Depth=   12986 States=    1e+07 Transitions= 4.23e+09 Memory=   882.404        t= 7.1e+03 R=   1e+03
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 12986, errors: 0
+  5822478 states, stored (1.02932e+07 visited)
+4.3658924e+09 states, matched
+4.3761856e+09 transitions (= visited+matched)
+2.5547511e+10 atomic steps
+hash conflicts: 1.1937107e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+  644.119      equivalent memory usage for states (stored*(State-vector + overhead))
+  303.976      actual memory usage for states (compression: 47.19%)
+               state-vector as stored = 19 byte + 36 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+  889.631      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 28603 2179 2102 2 2 ]
+unreached in proctype urcu_reader
+       line 268, "pan.___", state 55, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 86, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 102, "(1)"
+       line 249, "pan.___", state 110, "(1)"
+       line 253, "pan.___", state 122, "(1)"
+       line 257, "pan.___", state 130, "(1)"
+       line 404, "pan.___", state 156, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 188, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 202, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 221, "(1)"
+       line 431, "pan.___", state 251, "(1)"
+       line 435, "pan.___", state 264, "(1)"
+       line 614, "pan.___", state 285, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 404, "pan.___", state 292, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 324, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 338, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 357, "(1)"
+       line 431, "pan.___", state 387, "(1)"
+       line 435, "pan.___", state 400, "(1)"
+       line 404, "pan.___", state 421, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 453, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 467, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 486, "(1)"
+       line 431, "pan.___", state 516, "(1)"
+       line 435, "pan.___", state 529, "(1)"
+       line 404, "pan.___", state 552, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 554, "(1)"
+       line 404, "pan.___", state 555, "(cache_dirty_urcu_gp_ctr)"
+       line 404, "pan.___", state 555, "else"
+       line 404, "pan.___", state 558, "(1)"
+       line 408, "pan.___", state 566, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 568, "(1)"
+       line 408, "pan.___", state 569, "(cache_dirty_urcu_active_readers)"
+       line 408, "pan.___", state 569, "else"
+       line 408, "pan.___", state 572, "(1)"
+       line 408, "pan.___", state 573, "(1)"
+       line 408, "pan.___", state 573, "(1)"
+       line 406, "pan.___", state 578, "((i<1))"
+       line 406, "pan.___", state 578, "((i>=1))"
+       line 413, "pan.___", state 584, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 586, "(1)"
+       line 413, "pan.___", state 587, "(cache_dirty_rcu_ptr)"
+       line 413, "pan.___", state 587, "else"
+       line 413, "pan.___", state 590, "(1)"
+       line 413, "pan.___", state 591, "(1)"
+       line 413, "pan.___", state 591, "(1)"
+       line 417, "pan.___", state 598, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 600, "(1)"
+       line 417, "pan.___", state 601, "(cache_dirty_rcu_data[i])"
+       line 417, "pan.___", state 601, "else"
+       line 417, "pan.___", state 604, "(1)"
+       line 417, "pan.___", state 605, "(1)"
+       line 417, "pan.___", state 605, "(1)"
+       line 415, "pan.___", state 610, "((i<2))"
+       line 415, "pan.___", state 610, "((i>=2))"
+       line 422, "pan.___", state 617, "(1)"
+       line 422, "pan.___", state 618, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 618, "else"
+       line 422, "pan.___", state 621, "(1)"
+       line 422, "pan.___", state 622, "(1)"
+       line 422, "pan.___", state 622, "(1)"
+       line 426, "pan.___", state 630, "(1)"
+       line 426, "pan.___", state 631, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 631, "else"
+       line 426, "pan.___", state 634, "(1)"
+       line 426, "pan.___", state 635, "(1)"
+       line 426, "pan.___", state 635, "(1)"
+       line 424, "pan.___", state 640, "((i<1))"
+       line 424, "pan.___", state 640, "((i>=1))"
+       line 431, "pan.___", state 647, "(1)"
+       line 431, "pan.___", state 648, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 648, "else"
+       line 431, "pan.___", state 651, "(1)"
+       line 431, "pan.___", state 652, "(1)"
+       line 431, "pan.___", state 652, "(1)"
+       line 435, "pan.___", state 660, "(1)"
+       line 435, "pan.___", state 661, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 661, "else"
+       line 435, "pan.___", state 664, "(1)"
+       line 435, "pan.___", state 665, "(1)"
+       line 435, "pan.___", state 665, "(1)"
+       line 433, "pan.___", state 670, "((i<2))"
+       line 433, "pan.___", state 670, "((i>=2))"
+       line 443, "pan.___", state 674, "(1)"
+       line 443, "pan.___", state 674, "(1)"
+       line 614, "pan.___", state 677, "cached_urcu_active_readers = (tmp+1)"
+       line 614, "pan.___", state 678, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 614, "pan.___", state 679, "(1)"
+       line 404, "pan.___", state 686, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 718, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 732, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 751, "(1)"
+       line 431, "pan.___", state 781, "(1)"
+       line 435, "pan.___", state 794, "(1)"
+       line 404, "pan.___", state 821, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 853, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 867, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 886, "(1)"
+       line 431, "pan.___", state 916, "(1)"
+       line 435, "pan.___", state 929, "(1)"
+       line 404, "pan.___", state 950, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 982, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 996, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 1015, "(1)"
+       line 431, "pan.___", state 1045, "(1)"
+       line 435, "pan.___", state 1058, "(1)"
+       line 245, "pan.___", state 1091, "(1)"
+       line 253, "pan.___", state 1111, "(1)"
+       line 257, "pan.___", state 1119, "(1)"
+       line 748, "pan.___", state 1136, "-end-"
+       (91 of 1136 states)
+unreached in proctype urcu_writer
+       line 404, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 422, "pan.___", state 110, "(1)"
+       line 426, "pan.___", state 123, "(1)"
+       line 431, "pan.___", state 140, "(1)"
+       line 268, "pan.___", state 176, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 185, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 198, "cache_dirty_rcu_ptr = 0"
+       line 404, "pan.___", state 238, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 252, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 270, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 284, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 303, "(1)"
+       line 426, "pan.___", state 316, "(1)"
+       line 431, "pan.___", state 333, "(1)"
+       line 435, "pan.___", state 346, "(1)"
+       line 408, "pan.___", state 383, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 401, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 415, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 447, "(1)"
+       line 431, "pan.___", state 464, "(1)"
+       line 435, "pan.___", state 477, "(1)"
+       line 408, "pan.___", state 522, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 540, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 554, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 586, "(1)"
+       line 431, "pan.___", state 603, "(1)"
+       line 435, "pan.___", state 616, "(1)"
+       line 408, "pan.___", state 651, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 669, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 683, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 715, "(1)"
+       line 431, "pan.___", state 732, "(1)"
+       line 435, "pan.___", state 745, "(1)"
+       line 408, "pan.___", state 782, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 800, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 814, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 846, "(1)"
+       line 431, "pan.___", state 863, "(1)"
+       line 435, "pan.___", state 876, "(1)"
+       line 268, "pan.___", state 926, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 935, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 948, "cache_dirty_rcu_ptr = 0"
+       line 245, "pan.___", state 973, "(1)"
+       line 249, "pan.___", state 981, "(1)"
+       line 253, "pan.___", state 993, "(1)"
+       line 257, "pan.___", state 1001, "(1)"
+       line 268, "pan.___", state 1032, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1041, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1054, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1063, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1079, "(1)"
+       line 249, "pan.___", state 1087, "(1)"
+       line 253, "pan.___", state 1099, "(1)"
+       line 257, "pan.___", state 1107, "(1)"
+       line 268, "pan.___", state 1128, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1137, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1152, "(1)"
+       line 280, "pan.___", state 1159, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1175, "(1)"
+       line 249, "pan.___", state 1183, "(1)"
+       line 253, "pan.___", state 1195, "(1)"
+       line 257, "pan.___", state 1203, "(1)"
+       line 268, "pan.___", state 1234, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1243, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1256, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1265, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1281, "(1)"
+       line 249, "pan.___", state 1289, "(1)"
+       line 253, "pan.___", state 1301, "(1)"
+       line 257, "pan.___", state 1309, "(1)"
+       line 272, "pan.___", state 1335, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1348, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1357, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1373, "(1)"
+       line 249, "pan.___", state 1381, "(1)"
+       line 253, "pan.___", state 1393, "(1)"
+       line 257, "pan.___", state 1401, "(1)"
+       line 268, "pan.___", state 1432, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1441, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1454, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1463, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1479, "(1)"
+       line 249, "pan.___", state 1487, "(1)"
+       line 253, "pan.___", state 1499, "(1)"
+       line 257, "pan.___", state 1507, "(1)"
+       line 272, "pan.___", state 1533, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1546, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1555, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1571, "(1)"
+       line 249, "pan.___", state 1579, "(1)"
+       line 253, "pan.___", state 1591, "(1)"
+       line 257, "pan.___", state 1599, "(1)"
+       line 268, "pan.___", state 1630, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1639, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1652, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1661, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1677, "(1)"
+       line 249, "pan.___", state 1685, "(1)"
+       line 253, "pan.___", state 1697, "(1)"
+       line 257, "pan.___", state 1705, "(1)"
+       line 272, "pan.___", state 1731, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1744, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1753, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1769, "(1)"
+       line 249, "pan.___", state 1777, "(1)"
+       line 253, "pan.___", state 1789, "(1)"
+       line 257, "pan.___", state 1797, "(1)"
+       line 268, "pan.___", state 1828, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1837, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1850, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1859, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1875, "(1)"
+       line 249, "pan.___", state 1883, "(1)"
+       line 253, "pan.___", state 1895, "(1)"
+       line 257, "pan.___", state 1903, "(1)"
+       line 1123, "pan.___", state 1919, "-end-"
+       (118 of 1919 states)
+unreached in proctype :init:
+       (0 of 26 states)
+unreached in proctype :never:
+       line 1186, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 7.33e+03 seconds
+pan: rate 1403.7352 states/second
+pan: avg transition delay 1.6756e-06 usec
+cp .input.spin urcu_progress_reader.spin.input
+cp .input.spin.trail urcu_progress_reader.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_reader.spin.input b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_reader.spin.input
new file mode 100644 (file)
index 0000000..1e7da85
--- /dev/null
@@ -0,0 +1,1157 @@
+#define READER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer.define b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer.define
new file mode 100644 (file)
index 0000000..1e4417f
--- /dev/null
@@ -0,0 +1 @@
+#define WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer.log b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer.log
new file mode 100644 (file)
index 0000000..ed8fbc3
--- /dev/null
@@ -0,0 +1,290 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1179)
+depth 7: Claim reached state 9 (line 1184)
+depth 46: Claim reached state 9 (line 1183)
+Depth=    3817 States=    1e+06 Transitions= 2.94e+08 Memory=   492.912        t=    480 R=   2e+03
+Depth=    3900 States=    2e+06 Transitions= 6.63e+08 Memory=   519.572        t= 1.09e+03 R=   2e+03
+Depth=    3900 States=    3e+06 Transitions= 1.33e+09 Memory=   543.986        t= 2.23e+03 R=   1e+03
+pan: resizing hashtable to -w22..  done
+Depth=    3900 States=    4e+06 Transitions= 1.71e+09 Memory=   601.279        t= 2.86e+03 R=   1e+03
+Depth=    3900 States=    5e+06 Transitions= 2.06e+09 Memory=   627.647        t= 3.45e+03 R=   1e+03
+Depth=    3900 States=    6e+06 Transitions= 2.49e+09 Memory=   653.818        t= 4.18e+03 R=   1e+03
+Depth=    3900 States=    7e+06 Transitions= 3.14e+09 Memory=   678.135        t= 5.28e+03 R=   1e+03
+Depth=    3900 States=    8e+06 Transitions= 3.52e+09 Memory=   704.404        t= 5.93e+03 R=   1e+03
+Depth=    3900 States=    9e+06 Transitions= 3.88e+09 Memory=   730.869        t= 6.53e+03 R=   1e+03
+pan: resizing hashtable to -w24..  done
+Depth=    3900 States=    1e+07 Transitions= 4.34e+09 Memory=   880.451        t= 7.29e+03 R=   1e+03
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 3900, errors: 0
+  5638524 states, stored (1.00251e+07 visited)
+4.3347674e+09 states, matched
+4.3447924e+09 transitions (= visited+matched)
+2.5387604e+10 atomic steps
+hash conflicts: 1.1627355e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+  623.769      equivalent memory usage for states (stored*(State-vector + overhead))
+  295.411      actual memory usage for states (compression: 47.36%)
+               state-vector as stored = 19 byte + 36 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+  881.037      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 26786 2128 1995 2 2 ]
+unreached in proctype urcu_reader
+       line 268, "pan.___", state 55, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 86, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 102, "(1)"
+       line 249, "pan.___", state 110, "(1)"
+       line 253, "pan.___", state 122, "(1)"
+       line 257, "pan.___", state 130, "(1)"
+       line 404, "pan.___", state 156, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 188, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 202, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 221, "(1)"
+       line 431, "pan.___", state 251, "(1)"
+       line 435, "pan.___", state 264, "(1)"
+       line 614, "pan.___", state 285, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 404, "pan.___", state 292, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 324, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 338, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 357, "(1)"
+       line 431, "pan.___", state 387, "(1)"
+       line 435, "pan.___", state 400, "(1)"
+       line 404, "pan.___", state 421, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 453, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 467, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 486, "(1)"
+       line 431, "pan.___", state 516, "(1)"
+       line 435, "pan.___", state 529, "(1)"
+       line 404, "pan.___", state 552, "cache_dirty_urcu_gp_ctr = 0"
+       line 404, "pan.___", state 554, "(1)"
+       line 404, "pan.___", state 555, "(cache_dirty_urcu_gp_ctr)"
+       line 404, "pan.___", state 555, "else"
+       line 404, "pan.___", state 558, "(1)"
+       line 408, "pan.___", state 566, "cache_dirty_urcu_active_readers = 0"
+       line 408, "pan.___", state 568, "(1)"
+       line 408, "pan.___", state 569, "(cache_dirty_urcu_active_readers)"
+       line 408, "pan.___", state 569, "else"
+       line 408, "pan.___", state 572, "(1)"
+       line 408, "pan.___", state 573, "(1)"
+       line 408, "pan.___", state 573, "(1)"
+       line 406, "pan.___", state 578, "((i<1))"
+       line 406, "pan.___", state 578, "((i>=1))"
+       line 413, "pan.___", state 584, "cache_dirty_rcu_ptr = 0"
+       line 413, "pan.___", state 586, "(1)"
+       line 413, "pan.___", state 587, "(cache_dirty_rcu_ptr)"
+       line 413, "pan.___", state 587, "else"
+       line 413, "pan.___", state 590, "(1)"
+       line 413, "pan.___", state 591, "(1)"
+       line 413, "pan.___", state 591, "(1)"
+       line 417, "pan.___", state 598, "cache_dirty_rcu_data[i] = 0"
+       line 417, "pan.___", state 600, "(1)"
+       line 417, "pan.___", state 601, "(cache_dirty_rcu_data[i])"
+       line 417, "pan.___", state 601, "else"
+       line 417, "pan.___", state 604, "(1)"
+       line 417, "pan.___", state 605, "(1)"
+       line 417, "pan.___", state 605, "(1)"
+       line 415, "pan.___", state 610, "((i<2))"
+       line 415, "pan.___", state 610, "((i>=2))"
+       line 422, "pan.___", state 617, "(1)"
+       line 422, "pan.___", state 618, "(!(cache_dirty_urcu_gp_ctr))"
+       line 422, "pan.___", state 618, "else"
+       line 422, "pan.___", state 621, "(1)"
+       line 422, "pan.___", state 622, "(1)"
+       line 422, "pan.___", state 622, "(1)"
+       line 426, "pan.___", state 630, "(1)"
+       line 426, "pan.___", state 631, "(!(cache_dirty_urcu_active_readers))"
+       line 426, "pan.___", state 631, "else"
+       line 426, "pan.___", state 634, "(1)"
+       line 426, "pan.___", state 635, "(1)"
+       line 426, "pan.___", state 635, "(1)"
+       line 424, "pan.___", state 640, "((i<1))"
+       line 424, "pan.___", state 640, "((i>=1))"
+       line 431, "pan.___", state 647, "(1)"
+       line 431, "pan.___", state 648, "(!(cache_dirty_rcu_ptr))"
+       line 431, "pan.___", state 648, "else"
+       line 431, "pan.___", state 651, "(1)"
+       line 431, "pan.___", state 652, "(1)"
+       line 431, "pan.___", state 652, "(1)"
+       line 435, "pan.___", state 660, "(1)"
+       line 435, "pan.___", state 661, "(!(cache_dirty_rcu_data[i]))"
+       line 435, "pan.___", state 661, "else"
+       line 435, "pan.___", state 664, "(1)"
+       line 435, "pan.___", state 665, "(1)"
+       line 435, "pan.___", state 665, "(1)"
+       line 433, "pan.___", state 670, "((i<2))"
+       line 433, "pan.___", state 670, "((i>=2))"
+       line 443, "pan.___", state 674, "(1)"
+       line 443, "pan.___", state 674, "(1)"
+       line 614, "pan.___", state 677, "cached_urcu_active_readers = (tmp+1)"
+       line 614, "pan.___", state 678, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 614, "pan.___", state 679, "(1)"
+       line 404, "pan.___", state 686, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 718, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 732, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 751, "(1)"
+       line 431, "pan.___", state 781, "(1)"
+       line 435, "pan.___", state 794, "(1)"
+       line 404, "pan.___", state 821, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 853, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 867, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 886, "(1)"
+       line 431, "pan.___", state 916, "(1)"
+       line 435, "pan.___", state 929, "(1)"
+       line 404, "pan.___", state 950, "cache_dirty_urcu_gp_ctr = 0"
+       line 413, "pan.___", state 982, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 996, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 1015, "(1)"
+       line 431, "pan.___", state 1045, "(1)"
+       line 435, "pan.___", state 1058, "(1)"
+       line 245, "pan.___", state 1091, "(1)"
+       line 253, "pan.___", state 1111, "(1)"
+       line 257, "pan.___", state 1119, "(1)"
+       line 748, "pan.___", state 1136, "-end-"
+       (91 of 1136 states)
+unreached in proctype urcu_writer
+       line 404, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 422, "pan.___", state 110, "(1)"
+       line 426, "pan.___", state 123, "(1)"
+       line 431, "pan.___", state 140, "(1)"
+       line 268, "pan.___", state 176, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 185, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 198, "cache_dirty_rcu_ptr = 0"
+       line 404, "pan.___", state 238, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 252, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 270, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 284, "cache_dirty_rcu_data[i] = 0"
+       line 422, "pan.___", state 303, "(1)"
+       line 426, "pan.___", state 316, "(1)"
+       line 431, "pan.___", state 333, "(1)"
+       line 435, "pan.___", state 346, "(1)"
+       line 408, "pan.___", state 383, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 401, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 415, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 447, "(1)"
+       line 431, "pan.___", state 464, "(1)"
+       line 435, "pan.___", state 477, "(1)"
+       line 408, "pan.___", state 522, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 540, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 554, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 586, "(1)"
+       line 431, "pan.___", state 603, "(1)"
+       line 435, "pan.___", state 616, "(1)"
+       line 408, "pan.___", state 651, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 669, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 683, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 715, "(1)"
+       line 431, "pan.___", state 732, "(1)"
+       line 435, "pan.___", state 745, "(1)"
+       line 408, "pan.___", state 782, "cache_dirty_urcu_active_readers = 0"
+       line 413, "pan.___", state 800, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 814, "cache_dirty_rcu_data[i] = 0"
+       line 426, "pan.___", state 846, "(1)"
+       line 431, "pan.___", state 863, "(1)"
+       line 435, "pan.___", state 876, "(1)"
+       line 268, "pan.___", state 931, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 940, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 955, "(1)"
+       line 280, "pan.___", state 962, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 978, "(1)"
+       line 249, "pan.___", state 986, "(1)"
+       line 253, "pan.___", state 998, "(1)"
+       line 257, "pan.___", state 1006, "(1)"
+       line 268, "pan.___", state 1037, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1046, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1059, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1068, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1084, "(1)"
+       line 249, "pan.___", state 1092, "(1)"
+       line 253, "pan.___", state 1104, "(1)"
+       line 257, "pan.___", state 1112, "(1)"
+       line 272, "pan.___", state 1138, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1151, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1160, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1176, "(1)"
+       line 249, "pan.___", state 1184, "(1)"
+       line 253, "pan.___", state 1196, "(1)"
+       line 257, "pan.___", state 1204, "(1)"
+       line 268, "pan.___", state 1235, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1244, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1257, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1266, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1282, "(1)"
+       line 249, "pan.___", state 1290, "(1)"
+       line 253, "pan.___", state 1302, "(1)"
+       line 257, "pan.___", state 1310, "(1)"
+       line 272, "pan.___", state 1336, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1349, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1358, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1374, "(1)"
+       line 249, "pan.___", state 1382, "(1)"
+       line 253, "pan.___", state 1394, "(1)"
+       line 257, "pan.___", state 1402, "(1)"
+       line 268, "pan.___", state 1433, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1442, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1455, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1464, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1480, "(1)"
+       line 249, "pan.___", state 1488, "(1)"
+       line 253, "pan.___", state 1500, "(1)"
+       line 257, "pan.___", state 1508, "(1)"
+       line 272, "pan.___", state 1534, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1547, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1556, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1572, "(1)"
+       line 249, "pan.___", state 1580, "(1)"
+       line 253, "pan.___", state 1592, "(1)"
+       line 257, "pan.___", state 1600, "(1)"
+       line 268, "pan.___", state 1631, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1640, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1653, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1662, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1678, "(1)"
+       line 249, "pan.___", state 1686, "(1)"
+       line 253, "pan.___", state 1698, "(1)"
+       line 257, "pan.___", state 1706, "(1)"
+       line 1123, "pan.___", state 1722, "-end-"
+       (103 of 1722 states)
+unreached in proctype :init:
+       (0 of 26 states)
+unreached in proctype :never:
+       line 1186, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 7.3e+03 seconds
+pan: rate 1373.8126 states/second
+pan: avg transition delay 1.6795e-06 usec
+cp .input.spin urcu_progress_writer.spin.input
+cp .input.spin.trail urcu_progress_writer.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer.spin.input b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer.spin.input
new file mode 100644 (file)
index 0000000..4f98f45
--- /dev/null
@@ -0,0 +1,1157 @@
+#define WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.define b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.define
new file mode 100644 (file)
index 0000000..8d304f5
--- /dev/null
@@ -0,0 +1,2 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.log b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.log
new file mode 100644 (file)
index 0000000..b95b25e
--- /dev/null
@@ -0,0 +1,315 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer_error.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1180)
+depth 7: Claim reached state 9 (line 1185)
+depth 46: Claim reached state 9 (line 1184)
+pan: acceptance cycle (at depth 3798)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 3956, errors: 1
+   139740 states, stored (233412 visited)
+ 43234612 states, matched
+ 43468024 transitions (= visited+matched)
+2.3201732e+08 atomic steps
+hash conflicts:    937081 (resolved)
+
+Stats on memory usage (in Megabytes):
+   15.459      equivalent memory usage for states (stored*(State-vector + overhead))
+    7.007      actual memory usage for states (compression: 45.33%)
+               state-vector as stored = 17 byte + 36 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  472.697      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 1001 532 228 2 2 ]
+unreached in proctype urcu_reader
+       line 269, "pan.___", state 55, "cache_dirty_urcu_gp_ctr = 0"
+       line 277, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 281, "pan.___", state 86, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 102, "(1)"
+       line 250, "pan.___", state 110, "(1)"
+       line 254, "pan.___", state 122, "(1)"
+       line 258, "pan.___", state 130, "(1)"
+       line 405, "pan.___", state 156, "cache_dirty_urcu_gp_ctr = 0"
+       line 414, "pan.___", state 188, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 202, "cache_dirty_rcu_data[i] = 0"
+       line 423, "pan.___", state 221, "(1)"
+       line 432, "pan.___", state 251, "(1)"
+       line 436, "pan.___", state 264, "(1)"
+       line 615, "pan.___", state 285, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 405, "pan.___", state 292, "cache_dirty_urcu_gp_ctr = 0"
+       line 414, "pan.___", state 324, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 338, "cache_dirty_rcu_data[i] = 0"
+       line 423, "pan.___", state 357, "(1)"
+       line 432, "pan.___", state 387, "(1)"
+       line 436, "pan.___", state 400, "(1)"
+       line 405, "pan.___", state 421, "cache_dirty_urcu_gp_ctr = 0"
+       line 414, "pan.___", state 453, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 467, "cache_dirty_rcu_data[i] = 0"
+       line 423, "pan.___", state 486, "(1)"
+       line 432, "pan.___", state 516, "(1)"
+       line 436, "pan.___", state 529, "(1)"
+       line 405, "pan.___", state 552, "cache_dirty_urcu_gp_ctr = 0"
+       line 405, "pan.___", state 554, "(1)"
+       line 405, "pan.___", state 555, "(cache_dirty_urcu_gp_ctr)"
+       line 405, "pan.___", state 555, "else"
+       line 405, "pan.___", state 558, "(1)"
+       line 409, "pan.___", state 566, "cache_dirty_urcu_active_readers = 0"
+       line 409, "pan.___", state 568, "(1)"
+       line 409, "pan.___", state 569, "(cache_dirty_urcu_active_readers)"
+       line 409, "pan.___", state 569, "else"
+       line 409, "pan.___", state 572, "(1)"
+       line 409, "pan.___", state 573, "(1)"
+       line 409, "pan.___", state 573, "(1)"
+       line 407, "pan.___", state 578, "((i<1))"
+       line 407, "pan.___", state 578, "((i>=1))"
+       line 414, "pan.___", state 584, "cache_dirty_rcu_ptr = 0"
+       line 414, "pan.___", state 586, "(1)"
+       line 414, "pan.___", state 587, "(cache_dirty_rcu_ptr)"
+       line 414, "pan.___", state 587, "else"
+       line 414, "pan.___", state 590, "(1)"
+       line 414, "pan.___", state 591, "(1)"
+       line 414, "pan.___", state 591, "(1)"
+       line 418, "pan.___", state 598, "cache_dirty_rcu_data[i] = 0"
+       line 418, "pan.___", state 600, "(1)"
+       line 418, "pan.___", state 601, "(cache_dirty_rcu_data[i])"
+       line 418, "pan.___", state 601, "else"
+       line 418, "pan.___", state 604, "(1)"
+       line 418, "pan.___", state 605, "(1)"
+       line 418, "pan.___", state 605, "(1)"
+       line 416, "pan.___", state 610, "((i<2))"
+       line 416, "pan.___", state 610, "((i>=2))"
+       line 423, "pan.___", state 617, "(1)"
+       line 423, "pan.___", state 618, "(!(cache_dirty_urcu_gp_ctr))"
+       line 423, "pan.___", state 618, "else"
+       line 423, "pan.___", state 621, "(1)"
+       line 423, "pan.___", state 622, "(1)"
+       line 423, "pan.___", state 622, "(1)"
+       line 427, "pan.___", state 630, "(1)"
+       line 427, "pan.___", state 631, "(!(cache_dirty_urcu_active_readers))"
+       line 427, "pan.___", state 631, "else"
+       line 427, "pan.___", state 634, "(1)"
+       line 427, "pan.___", state 635, "(1)"
+       line 427, "pan.___", state 635, "(1)"
+       line 425, "pan.___", state 640, "((i<1))"
+       line 425, "pan.___", state 640, "((i>=1))"
+       line 432, "pan.___", state 647, "(1)"
+       line 432, "pan.___", state 648, "(!(cache_dirty_rcu_ptr))"
+       line 432, "pan.___", state 648, "else"
+       line 432, "pan.___", state 651, "(1)"
+       line 432, "pan.___", state 652, "(1)"
+       line 432, "pan.___", state 652, "(1)"
+       line 436, "pan.___", state 660, "(1)"
+       line 436, "pan.___", state 661, "(!(cache_dirty_rcu_data[i]))"
+       line 436, "pan.___", state 661, "else"
+       line 436, "pan.___", state 664, "(1)"
+       line 436, "pan.___", state 665, "(1)"
+       line 436, "pan.___", state 665, "(1)"
+       line 434, "pan.___", state 670, "((i<2))"
+       line 434, "pan.___", state 670, "((i>=2))"
+       line 444, "pan.___", state 674, "(1)"
+       line 444, "pan.___", state 674, "(1)"
+       line 615, "pan.___", state 677, "cached_urcu_active_readers = (tmp+1)"
+       line 615, "pan.___", state 678, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 615, "pan.___", state 679, "(1)"
+       line 405, "pan.___", state 686, "cache_dirty_urcu_gp_ctr = 0"
+       line 414, "pan.___", state 718, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 732, "cache_dirty_rcu_data[i] = 0"
+       line 423, "pan.___", state 751, "(1)"
+       line 432, "pan.___", state 781, "(1)"
+       line 436, "pan.___", state 794, "(1)"
+       line 405, "pan.___", state 821, "cache_dirty_urcu_gp_ctr = 0"
+       line 414, "pan.___", state 853, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 867, "cache_dirty_rcu_data[i] = 0"
+       line 423, "pan.___", state 886, "(1)"
+       line 432, "pan.___", state 916, "(1)"
+       line 436, "pan.___", state 929, "(1)"
+       line 405, "pan.___", state 950, "cache_dirty_urcu_gp_ctr = 0"
+       line 414, "pan.___", state 982, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 996, "cache_dirty_rcu_data[i] = 0"
+       line 423, "pan.___", state 1015, "(1)"
+       line 432, "pan.___", state 1045, "(1)"
+       line 436, "pan.___", state 1058, "(1)"
+       line 246, "pan.___", state 1091, "(1)"
+       line 254, "pan.___", state 1111, "(1)"
+       line 258, "pan.___", state 1119, "(1)"
+       line 749, "pan.___", state 1136, "-end-"
+       (91 of 1136 states)
+unreached in proctype urcu_writer
+       line 405, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 409, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 414, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 423, "pan.___", state 110, "(1)"
+       line 427, "pan.___", state 123, "(1)"
+       line 432, "pan.___", state 140, "(1)"
+       line 269, "pan.___", state 176, "cache_dirty_urcu_gp_ctr = 0"
+       line 273, "pan.___", state 185, "cache_dirty_urcu_active_readers = 0"
+       line 277, "pan.___", state 198, "cache_dirty_rcu_ptr = 0"
+       line 405, "pan.___", state 238, "cache_dirty_urcu_gp_ctr = 0"
+       line 409, "pan.___", state 252, "cache_dirty_urcu_active_readers = 0"
+       line 414, "pan.___", state 270, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 284, "cache_dirty_rcu_data[i] = 0"
+       line 423, "pan.___", state 303, "(1)"
+       line 427, "pan.___", state 316, "(1)"
+       line 432, "pan.___", state 333, "(1)"
+       line 436, "pan.___", state 346, "(1)"
+       line 409, "pan.___", state 383, "cache_dirty_urcu_active_readers = 0"
+       line 414, "pan.___", state 401, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 415, "cache_dirty_rcu_data[i] = 0"
+       line 427, "pan.___", state 447, "(1)"
+       line 432, "pan.___", state 464, "(1)"
+       line 436, "pan.___", state 477, "(1)"
+       line 405, "pan.___", state 504, "cache_dirty_urcu_gp_ctr = 0"
+       line 405, "pan.___", state 506, "(1)"
+       line 405, "pan.___", state 507, "(cache_dirty_urcu_gp_ctr)"
+       line 405, "pan.___", state 507, "else"
+       line 405, "pan.___", state 510, "(1)"
+       line 409, "pan.___", state 518, "cache_dirty_urcu_active_readers = 0"
+       line 409, "pan.___", state 520, "(1)"
+       line 409, "pan.___", state 521, "(cache_dirty_urcu_active_readers)"
+       line 409, "pan.___", state 521, "else"
+       line 409, "pan.___", state 524, "(1)"
+       line 409, "pan.___", state 525, "(1)"
+       line 409, "pan.___", state 525, "(1)"
+       line 407, "pan.___", state 530, "((i<1))"
+       line 407, "pan.___", state 530, "((i>=1))"
+       line 414, "pan.___", state 536, "cache_dirty_rcu_ptr = 0"
+       line 414, "pan.___", state 538, "(1)"
+       line 414, "pan.___", state 539, "(cache_dirty_rcu_ptr)"
+       line 414, "pan.___", state 539, "else"
+       line 414, "pan.___", state 542, "(1)"
+       line 414, "pan.___", state 543, "(1)"
+       line 414, "pan.___", state 543, "(1)"
+       line 418, "pan.___", state 550, "cache_dirty_rcu_data[i] = 0"
+       line 418, "pan.___", state 552, "(1)"
+       line 418, "pan.___", state 553, "(cache_dirty_rcu_data[i])"
+       line 418, "pan.___", state 553, "else"
+       line 418, "pan.___", state 556, "(1)"
+       line 418, "pan.___", state 557, "(1)"
+       line 418, "pan.___", state 557, "(1)"
+       line 416, "pan.___", state 562, "((i<2))"
+       line 416, "pan.___", state 562, "((i>=2))"
+       line 423, "pan.___", state 569, "(1)"
+       line 423, "pan.___", state 570, "(!(cache_dirty_urcu_gp_ctr))"
+       line 423, "pan.___", state 570, "else"
+       line 423, "pan.___", state 573, "(1)"
+       line 423, "pan.___", state 574, "(1)"
+       line 423, "pan.___", state 574, "(1)"
+       line 427, "pan.___", state 582, "(1)"
+       line 427, "pan.___", state 583, "(!(cache_dirty_urcu_active_readers))"
+       line 427, "pan.___", state 583, "else"
+       line 427, "pan.___", state 586, "(1)"
+       line 427, "pan.___", state 587, "(1)"
+       line 427, "pan.___", state 587, "(1)"
+       line 425, "pan.___", state 592, "((i<1))"
+       line 425, "pan.___", state 592, "((i>=1))"
+       line 432, "pan.___", state 599, "(1)"
+       line 432, "pan.___", state 600, "(!(cache_dirty_rcu_ptr))"
+       line 432, "pan.___", state 600, "else"
+       line 432, "pan.___", state 603, "(1)"
+       line 432, "pan.___", state 604, "(1)"
+       line 432, "pan.___", state 604, "(1)"
+       line 436, "pan.___", state 612, "(1)"
+       line 436, "pan.___", state 613, "(!(cache_dirty_rcu_data[i]))"
+       line 436, "pan.___", state 613, "else"
+       line 436, "pan.___", state 616, "(1)"
+       line 436, "pan.___", state 617, "(1)"
+       line 436, "pan.___", state 617, "(1)"
+       line 444, "pan.___", state 626, "(1)"
+       line 444, "pan.___", state 626, "(1)"
+       line 409, "pan.___", state 646, "cache_dirty_urcu_active_readers = 0"
+       line 414, "pan.___", state 664, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 678, "cache_dirty_rcu_data[i] = 0"
+       line 427, "pan.___", state 710, "(1)"
+       line 432, "pan.___", state 727, "(1)"
+       line 436, "pan.___", state 740, "(1)"
+       line 409, "pan.___", state 775, "cache_dirty_urcu_active_readers = 0"
+       line 414, "pan.___", state 793, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 807, "cache_dirty_rcu_data[i] = 0"
+       line 427, "pan.___", state 839, "(1)"
+       line 432, "pan.___", state 856, "(1)"
+       line 436, "pan.___", state 869, "(1)"
+       line 409, "pan.___", state 906, "cache_dirty_urcu_active_readers = 0"
+       line 414, "pan.___", state 924, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 938, "cache_dirty_rcu_data[i] = 0"
+       line 427, "pan.___", state 970, "(1)"
+       line 432, "pan.___", state 987, "(1)"
+       line 436, "pan.___", state 1000, "(1)"
+       line 409, "pan.___", state 1040, "cache_dirty_urcu_active_readers = 0"
+       line 414, "pan.___", state 1058, "cache_dirty_rcu_ptr = 0"
+       line 418, "pan.___", state 1072, "cache_dirty_rcu_data[i] = 0"
+       line 427, "pan.___", state 1104, "(1)"
+       line 432, "pan.___", state 1121, "(1)"
+       line 436, "pan.___", state 1134, "(1)"
+       line 269, "pan.___", state 1180, "cache_dirty_urcu_gp_ctr = 0"
+       line 273, "pan.___", state 1189, "cache_dirty_urcu_active_readers = 0"
+       line 277, "pan.___", state 1204, "(1)"
+       line 281, "pan.___", state 1211, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1227, "(1)"
+       line 250, "pan.___", state 1235, "(1)"
+       line 254, "pan.___", state 1247, "(1)"
+       line 258, "pan.___", state 1255, "(1)"
+       line 269, "pan.___", state 1286, "cache_dirty_urcu_gp_ctr = 0"
+       line 273, "pan.___", state 1295, "cache_dirty_urcu_active_readers = 0"
+       line 277, "pan.___", state 1308, "cache_dirty_rcu_ptr = 0"
+       line 281, "pan.___", state 1317, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1333, "(1)"
+       line 250, "pan.___", state 1341, "(1)"
+       line 254, "pan.___", state 1353, "(1)"
+       line 258, "pan.___", state 1361, "(1)"
+       line 273, "pan.___", state 1387, "cache_dirty_urcu_active_readers = 0"
+       line 277, "pan.___", state 1400, "cache_dirty_rcu_ptr = 0"
+       line 281, "pan.___", state 1409, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1425, "(1)"
+       line 250, "pan.___", state 1433, "(1)"
+       line 254, "pan.___", state 1445, "(1)"
+       line 258, "pan.___", state 1453, "(1)"
+       line 269, "pan.___", state 1484, "cache_dirty_urcu_gp_ctr = 0"
+       line 273, "pan.___", state 1493, "cache_dirty_urcu_active_readers = 0"
+       line 277, "pan.___", state 1506, "cache_dirty_rcu_ptr = 0"
+       line 281, "pan.___", state 1515, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1531, "(1)"
+       line 250, "pan.___", state 1539, "(1)"
+       line 254, "pan.___", state 1551, "(1)"
+       line 258, "pan.___", state 1559, "(1)"
+       line 1124, "pan.___", state 1575, "-end-"
+       (118 of 1575 states)
+unreached in proctype :init:
+       (0 of 26 states)
+unreached in proctype :never:
+       line 1187, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 67.7 seconds
+pan: rate 3447.2308 states/second
+pan: avg transition delay 1.5577e-06 usec
+cp .input.spin urcu_progress_writer_error.spin.input
+cp .input.spin.trail urcu_progress_writer_error.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-min-progress'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.spin.input b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.spin.input
new file mode 100644 (file)
index 0000000..035bdf7
--- /dev/null
@@ -0,0 +1,1158 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN, READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_OUT)
+                               || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE
+                                               | READ_LOCK_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN
+                                               | READ_UNLOCK_OUT, 0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT,              /* post-dominant */
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT,          /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       data_read_first[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.spin.input.trail b/formal-model/urcu-controldataflow-alpha-ipi-progress-minimal/urcu_progress_writer_error.spin.input.trail
new file mode 100644 (file)
index 0000000..a8b0188
--- /dev/null
@@ -0,0 +1,3959 @@
+-2:3:-2
+-4:-4:-4
+1:0:2739
+2:2:1136
+3:2:1141
+4:2:1145
+5:2:1153
+6:2:1157
+7:2:1161
+8:0:2739
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:2739
+16:3:2711
+17:3:2714
+18:3:2719
+19:3:2726
+20:3:2729
+21:3:2733
+22:3:2734
+23:0:2739
+24:3:2736
+25:0:2739
+26:2:1165
+27:0:2739
+28:2:1171
+29:0:2739
+30:2:1172
+31:0:2739
+32:2:1173
+33:0:2739
+34:2:1174
+35:0:2739
+36:2:1175
+37:0:2739
+38:2:1176
+39:2:1177
+40:2:1181
+41:2:1182
+42:2:1190
+43:2:1191
+44:2:1195
+45:2:1196
+46:2:1204
+47:2:1209
+48:2:1213
+49:2:1214
+50:2:1222
+51:2:1223
+52:2:1227
+53:2:1228
+54:2:1222
+55:2:1223
+56:2:1227
+57:2:1228
+58:2:1236
+59:2:1241
+60:2:1242
+61:2:1253
+62:2:1254
+63:2:1255
+64:2:1266
+65:2:1271
+66:2:1272
+67:2:1283
+68:2:1284
+69:2:1285
+70:2:1283
+71:2:1284
+72:2:1285
+73:2:1296
+74:2:1304
+75:0:2739
+76:2:1175
+77:0:2739
+78:2:1308
+79:2:1312
+80:2:1313
+81:2:1317
+82:2:1321
+83:2:1322
+84:2:1326
+85:2:1334
+86:2:1335
+87:2:1339
+88:2:1343
+89:2:1344
+90:2:1339
+91:2:1340
+92:2:1348
+93:0:2739
+94:2:1175
+95:0:2739
+96:2:1356
+97:2:1357
+98:2:1358
+99:0:2739
+100:2:1175
+101:0:2739
+102:2:1363
+103:0:2739
+104:2:2316
+105:2:2317
+106:2:2321
+107:2:2325
+108:2:2326
+109:2:2330
+110:2:2335
+111:2:2343
+112:2:2347
+113:2:2348
+114:2:2343
+115:2:2347
+116:2:2348
+117:2:2352
+118:2:2359
+119:2:2366
+120:2:2367
+121:2:2374
+122:2:2379
+123:2:2386
+124:2:2387
+125:2:2386
+126:2:2387
+127:2:2394
+128:2:2398
+129:0:2739
+130:2:2403
+131:0:2739
+132:2:2404
+133:0:2739
+134:2:2405
+135:0:2739
+136:2:2406
+137:0:2739
+138:1:29
+139:0:2739
+140:1:35
+141:0:2739
+142:1:36
+143:0:2739
+144:2:2407
+145:0:2739
+146:1:37
+147:0:2739
+148:2:2406
+149:0:2739
+150:1:38
+151:0:2739
+152:2:2407
+153:0:2739
+154:1:39
+155:0:2739
+156:1:40
+157:0:2739
+158:1:41
+159:0:2739
+160:2:2406
+161:0:2739
+162:1:42
+163:0:2739
+164:2:2407
+165:0:2739
+166:1:51
+167:0:2739
+168:2:2406
+169:0:2739
+170:1:55
+171:1:56
+172:1:60
+173:1:64
+174:1:65
+175:1:69
+176:1:77
+177:1:78
+178:1:82
+179:1:86
+180:1:87
+181:1:82
+182:1:86
+183:1:87
+184:1:91
+185:1:98
+186:1:105
+187:1:106
+188:1:113
+189:1:118
+190:1:125
+191:1:126
+192:1:125
+193:1:126
+194:1:133
+195:1:137
+196:0:2739
+197:2:2407
+198:0:2739
+199:1:142
+200:0:2739
+201:2:2408
+202:0:2739
+203:2:2413
+204:0:2739
+205:2:2414
+206:0:2739
+207:2:2422
+208:2:2423
+209:2:2427
+210:2:2431
+211:2:2432
+212:2:2436
+213:2:2444
+214:2:2445
+215:2:2449
+216:2:2453
+217:2:2454
+218:2:2449
+219:2:2453
+220:2:2454
+221:2:2458
+222:2:2465
+223:2:2472
+224:2:2473
+225:2:2480
+226:2:2485
+227:2:2492
+228:2:2493
+229:2:2492
+230:2:2493
+231:2:2500
+232:2:2504
+233:0:2739
+234:2:1365
+235:2:2297
+236:0:2739
+237:2:1175
+238:0:2739
+239:2:1366
+240:0:2739
+241:2:1175
+242:0:2739
+243:2:1369
+244:2:1370
+245:2:1374
+246:2:1375
+247:2:1383
+248:2:1384
+249:2:1388
+250:2:1389
+251:2:1397
+252:2:1402
+253:2:1406
+254:2:1407
+255:2:1415
+256:2:1416
+257:2:1420
+258:2:1421
+259:2:1415
+260:2:1416
+261:2:1420
+262:2:1421
+263:2:1429
+264:2:1434
+265:2:1435
+266:2:1446
+267:2:1447
+268:2:1448
+269:2:1459
+270:2:1464
+271:2:1465
+272:2:1476
+273:2:1477
+274:2:1478
+275:2:1476
+276:2:1477
+277:2:1478
+278:2:1489
+279:2:1496
+280:0:2739
+281:2:1175
+282:0:2739
+283:2:1500
+284:2:1501
+285:2:1502
+286:2:1514
+287:2:1515
+288:2:1519
+289:2:1520
+290:2:1528
+291:2:1533
+292:2:1537
+293:2:1538
+294:2:1546
+295:2:1547
+296:2:1551
+297:2:1552
+298:2:1546
+299:2:1547
+300:2:1551
+301:2:1552
+302:2:1560
+303:2:1565
+304:2:1566
+305:2:1577
+306:2:1578
+307:2:1579
+308:2:1590
+309:2:1595
+310:2:1596
+311:2:1607
+312:2:1608
+313:2:1609
+314:2:1607
+315:2:1608
+316:2:1609
+317:2:1620
+318:2:1631
+319:2:1632
+320:0:2739
+321:2:1175
+322:0:2739
+323:2:1763
+324:2:1764
+325:2:1768
+326:2:1769
+327:2:1777
+328:2:1778
+329:2:1782
+330:2:1783
+331:2:1791
+332:2:1796
+333:2:1800
+334:2:1801
+335:2:1809
+336:2:1810
+337:2:1814
+338:2:1815
+339:2:1809
+340:2:1810
+341:2:1814
+342:2:1815
+343:2:1823
+344:2:1828
+345:2:1829
+346:2:1840
+347:2:1841
+348:2:1842
+349:2:1853
+350:2:1858
+351:2:1859
+352:2:1870
+353:2:1871
+354:2:1872
+355:2:1870
+356:2:1871
+357:2:1872
+358:2:1883
+359:0:2739
+360:2:1175
+361:0:2739
+362:2:1892
+363:2:1893
+364:2:1897
+365:2:1898
+366:2:1906
+367:2:1907
+368:2:1911
+369:2:1912
+370:2:1920
+371:2:1925
+372:2:1929
+373:2:1930
+374:2:1938
+375:2:1939
+376:2:1943
+377:2:1944
+378:2:1938
+379:2:1939
+380:2:1943
+381:2:1944
+382:2:1952
+383:2:1957
+384:2:1958
+385:2:1969
+386:2:1970
+387:2:1971
+388:2:1982
+389:2:1987
+390:2:1988
+391:2:1999
+392:2:2000
+393:2:2001
+394:2:1999
+395:2:2000
+396:2:2001
+397:2:2012
+398:2:2019
+399:0:2739
+400:2:1175
+401:0:2739
+402:2:2023
+403:2:2024
+404:2:2025
+405:2:2037
+406:2:2038
+407:2:2042
+408:2:2043
+409:2:2051
+410:2:2056
+411:2:2060
+412:2:2061
+413:2:2069
+414:2:2070
+415:2:2074
+416:2:2075
+417:2:2069
+418:2:2070
+419:2:2074
+420:2:2075
+421:2:2083
+422:2:2088
+423:2:2089
+424:2:2100
+425:2:2101
+426:2:2102
+427:2:2113
+428:2:2118
+429:2:2119
+430:2:2130
+431:2:2131
+432:2:2132
+433:2:2130
+434:2:2131
+435:2:2132
+436:2:2143
+437:2:2153
+438:2:2154
+439:0:2739
+440:2:1175
+441:0:2739
+442:2:2285
+443:0:2739
+444:2:2514
+445:2:2515
+446:2:2519
+447:2:2523
+448:2:2524
+449:2:2528
+450:2:2536
+451:2:2537
+452:2:2541
+453:2:2545
+454:2:2546
+455:2:2541
+456:2:2545
+457:2:2546
+458:2:2550
+459:2:2557
+460:2:2564
+461:2:2565
+462:2:2572
+463:2:2577
+464:2:2584
+465:2:2585
+466:2:2584
+467:2:2585
+468:2:2592
+469:2:2596
+470:0:2739
+471:2:2601
+472:0:2739
+473:2:2602
+474:0:2739
+475:2:2603
+476:0:2739
+477:2:2604
+478:0:2739
+479:1:51
+480:0:2739
+481:2:2605
+482:0:2739
+483:1:55
+484:1:56
+485:1:60
+486:1:64
+487:1:65
+488:1:69
+489:1:77
+490:1:78
+491:1:82
+492:1:86
+493:1:87
+494:1:82
+495:1:86
+496:1:87
+497:1:91
+498:1:98
+499:1:105
+500:1:106
+501:1:113
+502:1:118
+503:1:125
+504:1:126
+505:1:125
+506:1:126
+507:1:133
+508:1:137
+509:0:2739
+510:2:2604
+511:0:2739
+512:1:142
+513:0:2739
+514:2:2605
+515:0:2739
+516:2:2606
+517:0:2739
+518:2:2611
+519:0:2739
+520:2:2612
+521:0:2739
+522:2:2620
+523:2:2621
+524:2:2625
+525:2:2629
+526:2:2630
+527:2:2634
+528:2:2642
+529:2:2643
+530:2:2647
+531:2:2651
+532:2:2652
+533:2:2647
+534:2:2651
+535:2:2652
+536:2:2656
+537:2:2663
+538:2:2670
+539:2:2671
+540:2:2678
+541:2:2683
+542:2:2690
+543:2:2691
+544:2:2690
+545:2:2691
+546:2:2698
+547:2:2702
+548:0:2739
+549:2:2287
+550:2:2297
+551:0:2739
+552:2:1175
+553:0:2739
+554:2:2288
+555:2:2289
+556:0:2739
+557:2:1175
+558:0:2739
+559:2:2293
+560:0:2739
+561:2:2301
+562:0:2739
+563:2:1172
+564:0:2739
+565:2:1173
+566:0:2739
+567:2:1174
+568:0:2739
+569:2:1175
+570:0:2739
+571:2:1176
+572:2:1177
+573:2:1181
+574:2:1182
+575:2:1190
+576:2:1191
+577:2:1195
+578:2:1196
+579:2:1204
+580:2:1209
+581:2:1213
+582:2:1214
+583:2:1222
+584:2:1223
+585:2:1224
+586:2:1222
+587:2:1223
+588:2:1227
+589:2:1228
+590:2:1236
+591:2:1241
+592:2:1242
+593:2:1253
+594:2:1254
+595:2:1255
+596:2:1266
+597:2:1271
+598:2:1272
+599:2:1283
+600:2:1284
+601:2:1285
+602:2:1283
+603:2:1284
+604:2:1285
+605:2:1296
+606:2:1304
+607:0:2739
+608:2:1175
+609:0:2739
+610:2:1308
+611:2:1312
+612:2:1313
+613:2:1317
+614:2:1321
+615:2:1322
+616:2:1326
+617:2:1334
+618:2:1335
+619:2:1339
+620:2:1340
+621:2:1339
+622:2:1343
+623:2:1344
+624:2:1348
+625:0:2739
+626:2:1175
+627:0:2739
+628:2:1356
+629:2:1357
+630:2:1358
+631:0:2739
+632:2:1175
+633:0:2739
+634:2:1363
+635:0:2739
+636:2:2316
+637:2:2317
+638:2:2321
+639:2:2325
+640:2:2326
+641:2:2330
+642:2:2335
+643:2:2343
+644:2:2347
+645:2:2348
+646:2:2343
+647:2:2347
+648:2:2348
+649:2:2352
+650:2:2359
+651:2:2366
+652:2:2367
+653:2:2374
+654:2:2379
+655:2:2386
+656:2:2387
+657:2:2386
+658:2:2387
+659:2:2394
+660:2:2398
+661:0:2739
+662:2:2403
+663:0:2739
+664:2:2404
+665:0:2739
+666:2:2405
+667:0:2739
+668:2:2406
+669:0:2739
+670:1:51
+671:0:2739
+672:2:2407
+673:0:2739
+674:1:55
+675:1:56
+676:1:60
+677:1:64
+678:1:65
+679:1:69
+680:1:77
+681:1:78
+682:1:82
+683:1:86
+684:1:87
+685:1:82
+686:1:86
+687:1:87
+688:1:91
+689:1:98
+690:1:105
+691:1:106
+692:1:113
+693:1:118
+694:1:125
+695:1:126
+696:1:125
+697:1:126
+698:1:133
+699:1:137
+700:0:2739
+701:2:2406
+702:0:2739
+703:1:142
+704:0:2739
+705:2:2407
+706:0:2739
+707:2:2408
+708:0:2739
+709:2:2413
+710:0:2739
+711:2:2414
+712:0:2739
+713:2:2422
+714:2:2423
+715:2:2427
+716:2:2431
+717:2:2432
+718:2:2436
+719:2:2444
+720:2:2445
+721:2:2449
+722:2:2453
+723:2:2454
+724:2:2449
+725:2:2453
+726:2:2454
+727:2:2458
+728:2:2465
+729:2:2472
+730:2:2473
+731:2:2480
+732:2:2485
+733:2:2492
+734:2:2493
+735:2:2492
+736:2:2493
+737:2:2500
+738:2:2504
+739:0:2739
+740:2:1365
+741:2:2297
+742:0:2739
+743:2:1175
+744:0:2739
+745:2:1366
+746:0:2739
+747:2:1175
+748:0:2739
+749:2:1369
+750:2:1370
+751:2:1374
+752:2:1375
+753:2:1383
+754:2:1384
+755:2:1388
+756:2:1389
+757:2:1397
+758:2:1402
+759:2:1406
+760:2:1407
+761:2:1415
+762:2:1416
+763:2:1420
+764:2:1421
+765:2:1415
+766:2:1416
+767:2:1420
+768:2:1421
+769:2:1429
+770:2:1434
+771:2:1435
+772:2:1446
+773:2:1447
+774:2:1448
+775:2:1459
+776:2:1464
+777:2:1465
+778:2:1476
+779:2:1477
+780:2:1478
+781:2:1476
+782:2:1477
+783:2:1478
+784:2:1489
+785:2:1496
+786:0:2739
+787:2:1175
+788:0:2739
+789:2:1500
+790:2:1501
+791:2:1502
+792:2:1514
+793:2:1515
+794:2:1519
+795:2:1520
+796:2:1528
+797:2:1533
+798:2:1537
+799:2:1538
+800:2:1546
+801:2:1547
+802:2:1551
+803:2:1552
+804:2:1546
+805:2:1547
+806:2:1551
+807:2:1552
+808:2:1560
+809:2:1565
+810:2:1566
+811:2:1577
+812:2:1578
+813:2:1579
+814:2:1590
+815:2:1595
+816:2:1596
+817:2:1607
+818:2:1608
+819:2:1609
+820:2:1607
+821:2:1608
+822:2:1609
+823:2:1620
+824:2:1631
+825:2:1632
+826:0:2739
+827:2:1175
+828:0:2739
+829:2:1763
+830:2:1764
+831:2:1768
+832:2:1769
+833:2:1777
+834:2:1778
+835:2:1782
+836:2:1783
+837:2:1791
+838:2:1796
+839:2:1800
+840:2:1801
+841:2:1809
+842:2:1810
+843:2:1814
+844:2:1815
+845:2:1809
+846:2:1810
+847:2:1814
+848:2:1815
+849:2:1823
+850:2:1828
+851:2:1829
+852:2:1840
+853:2:1841
+854:2:1842
+855:2:1853
+856:2:1858
+857:2:1859
+858:2:1870
+859:2:1871
+860:2:1872
+861:2:1870
+862:2:1871
+863:2:1872
+864:2:1883
+865:0:2739
+866:2:1175
+867:0:2739
+868:2:1892
+869:2:1893
+870:2:1897
+871:2:1898
+872:2:1906
+873:2:1907
+874:2:1911
+875:2:1912
+876:2:1920
+877:2:1925
+878:2:1929
+879:2:1930
+880:2:1938
+881:2:1939
+882:2:1943
+883:2:1944
+884:2:1938
+885:2:1939
+886:2:1943
+887:2:1944
+888:2:1952
+889:2:1957
+890:2:1958
+891:2:1969
+892:2:1970
+893:2:1971
+894:2:1982
+895:2:1987
+896:2:1988
+897:2:1999
+898:2:2000
+899:2:2001
+900:2:1999
+901:2:2000
+902:2:2001
+903:2:2012
+904:2:2019
+905:0:2739
+906:2:1175
+907:0:2739
+908:2:2023
+909:2:2024
+910:2:2025
+911:2:2037
+912:2:2038
+913:2:2042
+914:2:2043
+915:2:2051
+916:2:2056
+917:2:2060
+918:2:2061
+919:2:2069
+920:2:2070
+921:2:2074
+922:2:2075
+923:2:2069
+924:2:2070
+925:2:2074
+926:2:2075
+927:2:2083
+928:2:2088
+929:2:2089
+930:2:2100
+931:2:2101
+932:2:2102
+933:2:2113
+934:2:2118
+935:2:2119
+936:2:2130
+937:2:2131
+938:2:2132
+939:2:2130
+940:2:2131
+941:2:2132
+942:2:2143
+943:2:2153
+944:2:2154
+945:0:2739
+946:2:1175
+947:0:2739
+948:2:2285
+949:0:2739
+950:2:2514
+951:2:2515
+952:2:2519
+953:2:2523
+954:2:2524
+955:2:2528
+956:2:2536
+957:2:2537
+958:2:2541
+959:2:2545
+960:2:2546
+961:2:2541
+962:2:2545
+963:2:2546
+964:2:2550
+965:2:2557
+966:2:2564
+967:2:2565
+968:2:2572
+969:2:2577
+970:2:2584
+971:2:2585
+972:2:2584
+973:2:2585
+974:2:2592
+975:2:2596
+976:0:2739
+977:2:2601
+978:0:2739
+979:2:2602
+980:0:2739
+981:2:2603
+982:0:2739
+983:2:2604
+984:0:2739
+985:1:51
+986:0:2739
+987:2:2605
+988:0:2739
+989:1:55
+990:1:56
+991:1:60
+992:1:64
+993:1:65
+994:1:69
+995:1:77
+996:1:78
+997:1:82
+998:1:86
+999:1:87
+1000:1:82
+1001:1:86
+1002:1:87
+1003:1:91
+1004:1:98
+1005:1:105
+1006:1:106
+1007:1:113
+1008:1:118
+1009:1:125
+1010:1:126
+1011:1:125
+1012:1:126
+1013:1:133
+1014:1:137
+1015:0:2739
+1016:2:2604
+1017:0:2739
+1018:1:142
+1019:0:2739
+1020:2:2605
+1021:0:2739
+1022:2:2606
+1023:0:2739
+1024:2:2611
+1025:0:2739
+1026:2:2612
+1027:0:2739
+1028:2:2620
+1029:2:2621
+1030:2:2625
+1031:2:2629
+1032:2:2630
+1033:2:2634
+1034:2:2642
+1035:2:2643
+1036:2:2647
+1037:2:2651
+1038:2:2652
+1039:2:2647
+1040:2:2651
+1041:2:2652
+1042:2:2656
+1043:2:2663
+1044:2:2670
+1045:2:2671
+1046:2:2678
+1047:2:2683
+1048:2:2690
+1049:2:2691
+1050:2:2690
+1051:2:2691
+1052:2:2698
+1053:2:2702
+1054:0:2739
+1055:2:2287
+1056:2:2297
+1057:0:2739
+1058:2:1175
+1059:0:2739
+1060:2:2288
+1061:2:2289
+1062:0:2739
+1063:2:1175
+1064:0:2739
+1065:2:2293
+1066:0:2739
+1067:2:2301
+1068:0:2739
+1069:2:1172
+1070:0:2739
+1071:2:1173
+1072:0:2739
+1073:2:1174
+1074:0:2739
+1075:2:1175
+1076:0:2739
+1077:2:1176
+1078:2:1177
+1079:2:1181
+1080:2:1182
+1081:2:1190
+1082:2:1191
+1083:2:1195
+1084:2:1196
+1085:2:1204
+1086:2:1209
+1087:2:1213
+1088:2:1214
+1089:2:1222
+1090:2:1223
+1091:2:1227
+1092:2:1228
+1093:2:1222
+1094:2:1223
+1095:2:1224
+1096:2:1236
+1097:2:1241
+1098:2:1242
+1099:2:1253
+1100:2:1254
+1101:2:1255
+1102:2:1266
+1103:2:1271
+1104:2:1272
+1105:2:1283
+1106:2:1284
+1107:2:1285
+1108:2:1283
+1109:2:1284
+1110:2:1285
+1111:2:1296
+1112:2:1304
+1113:0:2739
+1114:2:1175
+1115:0:2739
+1116:2:1308
+1117:2:1312
+1118:2:1313
+1119:2:1317
+1120:2:1321
+1121:2:1322
+1122:2:1326
+1123:2:1334
+1124:2:1335
+1125:2:1339
+1126:2:1343
+1127:2:1344
+1128:2:1339
+1129:2:1340
+1130:2:1348
+1131:0:2739
+1132:2:1175
+1133:0:2739
+1134:2:1356
+1135:2:1357
+1136:2:1358
+1137:0:2739
+1138:2:1175
+1139:0:2739
+1140:2:1363
+1141:0:2739
+1142:2:2316
+1143:2:2317
+1144:2:2321
+1145:2:2325
+1146:2:2326
+1147:2:2330
+1148:2:2335
+1149:2:2343
+1150:2:2347
+1151:2:2348
+1152:2:2343
+1153:2:2347
+1154:2:2348
+1155:2:2352
+1156:2:2359
+1157:2:2366
+1158:2:2367
+1159:2:2374
+1160:2:2379
+1161:2:2386
+1162:2:2387
+1163:2:2386
+1164:2:2387
+1165:2:2394
+1166:2:2398
+1167:0:2739
+1168:2:2403
+1169:0:2739
+1170:2:2404
+1171:0:2739
+1172:2:2405
+1173:0:2739
+1174:2:2406
+1175:0:2739
+1176:1:51
+1177:0:2739
+1178:2:2407
+1179:0:2739
+1180:1:55
+1181:1:56
+1182:1:60
+1183:1:64
+1184:1:65
+1185:1:69
+1186:1:77
+1187:1:78
+1188:1:82
+1189:1:86
+1190:1:87
+1191:1:82
+1192:1:86
+1193:1:87
+1194:1:91
+1195:1:98
+1196:1:105
+1197:1:106
+1198:1:113
+1199:1:118
+1200:1:125
+1201:1:126
+1202:1:125
+1203:1:126
+1204:1:133
+1205:1:137
+1206:0:2739
+1207:2:2406
+1208:0:2739
+1209:1:142
+1210:0:2739
+1211:2:2407
+1212:0:2739
+1213:2:2408
+1214:0:2739
+1215:2:2413
+1216:0:2739
+1217:2:2414
+1218:0:2739
+1219:2:2422
+1220:2:2423
+1221:2:2427
+1222:2:2431
+1223:2:2432
+1224:2:2436
+1225:2:2444
+1226:2:2445
+1227:2:2449
+1228:2:2453
+1229:2:2454
+1230:2:2449
+1231:2:2453
+1232:2:2454
+1233:2:2458
+1234:2:2465
+1235:2:2472
+1236:2:2473
+1237:2:2480
+1238:2:2485
+1239:2:2492
+1240:2:2493
+1241:2:2492
+1242:2:2493
+1243:2:2500
+1244:2:2504
+1245:0:2739
+1246:2:1365
+1247:2:2297
+1248:0:2739
+1249:2:1175
+1250:0:2739
+1251:2:1366
+1252:0:2739
+1253:2:1175
+1254:0:2739
+1255:2:1369
+1256:2:1370
+1257:2:1374
+1258:2:1375
+1259:2:1383
+1260:2:1384
+1261:2:1388
+1262:2:1389
+1263:2:1397
+1264:2:1402
+1265:2:1406
+1266:2:1407
+1267:2:1415
+1268:2:1416
+1269:2:1420
+1270:2:1421
+1271:2:1415
+1272:2:1416
+1273:2:1420
+1274:2:1421
+1275:2:1429
+1276:2:1434
+1277:2:1435
+1278:2:1446
+1279:2:1447
+1280:2:1448
+1281:2:1459
+1282:2:1464
+1283:2:1465
+1284:2:1476
+1285:2:1477
+1286:2:1478
+1287:2:1476
+1288:2:1477
+1289:2:1478
+1290:2:1489
+1291:2:1496
+1292:0:2739
+1293:2:1175
+1294:0:2739
+1295:2:1500
+1296:2:1501
+1297:2:1502
+1298:2:1514
+1299:2:1515
+1300:2:1519
+1301:2:1520
+1302:2:1528
+1303:2:1533
+1304:2:1537
+1305:2:1538
+1306:2:1546
+1307:2:1547
+1308:2:1551
+1309:2:1552
+1310:2:1546
+1311:2:1547
+1312:2:1551
+1313:2:1552
+1314:2:1560
+1315:2:1565
+1316:2:1566
+1317:2:1577
+1318:2:1578
+1319:2:1579
+1320:2:1590
+1321:2:1595
+1322:2:1596
+1323:2:1607
+1324:2:1608
+1325:2:1609
+1326:2:1607
+1327:2:1608
+1328:2:1609
+1329:2:1620
+1330:2:1631
+1331:2:1632
+1332:0:2739
+1333:2:1175
+1334:0:2739
+1335:2:1763
+1336:2:1764
+1337:2:1768
+1338:2:1769
+1339:2:1777
+1340:2:1778
+1341:2:1782
+1342:2:1783
+1343:2:1791
+1344:2:1796
+1345:2:1800
+1346:2:1801
+1347:2:1809
+1348:2:1810
+1349:2:1814
+1350:2:1815
+1351:2:1809
+1352:2:1810
+1353:2:1814
+1354:2:1815
+1355:2:1823
+1356:2:1828
+1357:2:1829
+1358:2:1840
+1359:2:1841
+1360:2:1842
+1361:2:1853
+1362:2:1858
+1363:2:1859
+1364:2:1870
+1365:2:1871
+1366:2:1872
+1367:2:1870
+1368:2:1871
+1369:2:1872
+1370:2:1883
+1371:0:2739
+1372:2:1175
+1373:0:2739
+1374:2:1892
+1375:2:1893
+1376:2:1897
+1377:2:1898
+1378:2:1906
+1379:2:1907
+1380:2:1911
+1381:2:1912
+1382:2:1920
+1383:2:1925
+1384:2:1929
+1385:2:1930
+1386:2:1938
+1387:2:1939
+1388:2:1943
+1389:2:1944
+1390:2:1938
+1391:2:1939
+1392:2:1943
+1393:2:1944
+1394:2:1952
+1395:2:1957
+1396:2:1958
+1397:2:1969
+1398:2:1970
+1399:2:1971
+1400:2:1982
+1401:2:1987
+1402:2:1988
+1403:2:1999
+1404:2:2000
+1405:2:2001
+1406:2:1999
+1407:2:2000
+1408:2:2001
+1409:2:2012
+1410:2:2019
+1411:0:2739
+1412:2:1175
+1413:0:2739
+1414:1:143
+1415:0:2739
+1416:1:145
+1417:0:2739
+1418:1:44
+1419:0:2739
+1420:1:151
+1421:1:152
+1422:1:156
+1423:1:157
+1424:1:165
+1425:1:166
+1426:1:170
+1427:1:171
+1428:1:179
+1429:1:184
+1430:1:188
+1431:1:189
+1432:1:197
+1433:1:198
+1434:1:202
+1435:1:203
+1436:1:197
+1437:1:198
+1438:1:202
+1439:1:203
+1440:1:211
+1441:1:216
+1442:1:217
+1443:1:228
+1444:1:229
+1445:1:230
+1446:1:241
+1447:1:246
+1448:1:247
+1449:1:258
+1450:1:259
+1451:1:260
+1452:1:258
+1453:1:259
+1454:1:260
+1455:1:271
+1456:0:2739
+1457:1:40
+1458:0:2739
+1459:1:41
+1460:0:2739
+1461:1:42
+1462:0:2739
+1463:1:143
+1464:0:2739
+1465:1:145
+1466:0:2739
+1467:1:44
+1468:0:2739
+1469:1:280
+1470:1:281
+1471:0:2739
+1472:1:40
+1473:0:2739
+1474:1:41
+1475:0:2739
+1476:1:42
+1477:0:2739
+1478:1:143
+1479:0:2739
+1480:1:145
+1481:0:2739
+1482:1:44
+1483:0:2739
+1484:1:287
+1485:1:288
+1486:1:292
+1487:1:293
+1488:1:301
+1489:1:302
+1490:1:306
+1491:1:307
+1492:1:315
+1493:1:320
+1494:1:324
+1495:1:325
+1496:1:333
+1497:1:334
+1498:1:338
+1499:1:339
+1500:1:333
+1501:1:334
+1502:1:338
+1503:1:339
+1504:1:347
+1505:1:352
+1506:1:353
+1507:1:364
+1508:1:365
+1509:1:366
+1510:1:377
+1511:1:382
+1512:1:383
+1513:1:394
+1514:1:395
+1515:1:396
+1516:1:394
+1517:1:395
+1518:1:396
+1519:1:407
+1520:0:2739
+1521:1:40
+1522:0:2739
+1523:1:41
+1524:0:2739
+1525:1:42
+1526:0:2739
+1527:1:143
+1528:0:2739
+1529:1:145
+1530:0:2739
+1531:1:44
+1532:0:2739
+1533:1:416
+1534:1:417
+1535:1:421
+1536:1:422
+1537:1:430
+1538:1:431
+1539:1:435
+1540:1:436
+1541:1:444
+1542:1:449
+1543:1:453
+1544:1:454
+1545:1:462
+1546:1:463
+1547:1:467
+1548:1:468
+1549:1:462
+1550:1:463
+1551:1:467
+1552:1:468
+1553:1:476
+1554:1:481
+1555:1:482
+1556:1:493
+1557:1:494
+1558:1:495
+1559:1:506
+1560:1:511
+1561:1:512
+1562:1:523
+1563:1:524
+1564:1:525
+1565:1:523
+1566:1:524
+1567:1:525
+1568:1:536
+1569:1:543
+1570:0:2739
+1571:1:40
+1572:0:2739
+1573:1:41
+1574:0:2739
+1575:1:42
+1576:0:2739
+1577:1:143
+1578:0:2739
+1579:1:145
+1580:0:2739
+1581:1:44
+1582:0:2739
+1583:1:681
+1584:1:682
+1585:1:686
+1586:1:687
+1587:1:695
+1588:1:696
+1589:1:697
+1590:1:709
+1591:1:714
+1592:1:718
+1593:1:719
+1594:1:727
+1595:1:728
+1596:1:732
+1597:1:733
+1598:1:727
+1599:1:728
+1600:1:732
+1601:1:733
+1602:1:741
+1603:1:746
+1604:1:747
+1605:1:758
+1606:1:759
+1607:1:760
+1608:1:771
+1609:1:776
+1610:1:777
+1611:1:788
+1612:1:789
+1613:1:790
+1614:1:788
+1615:1:789
+1616:1:790
+1617:1:801
+1618:0:2739
+1619:1:40
+1620:0:2739
+1621:1:41
+1622:0:2739
+1623:2:2023
+1624:2:2024
+1625:2:2025
+1626:2:2037
+1627:2:2038
+1628:2:2042
+1629:2:2043
+1630:2:2051
+1631:2:2056
+1632:2:2060
+1633:2:2061
+1634:2:2069
+1635:2:2070
+1636:2:2074
+1637:2:2075
+1638:2:2069
+1639:2:2070
+1640:2:2074
+1641:2:2075
+1642:2:2083
+1643:2:2088
+1644:2:2089
+1645:2:2100
+1646:2:2101
+1647:2:2102
+1648:2:2113
+1649:2:2118
+1650:2:2119
+1651:2:2130
+1652:2:2131
+1653:2:2132
+1654:2:2130
+1655:2:2131
+1656:2:2132
+1657:2:2143
+1658:2:2151
+1659:0:2739
+1660:2:1175
+1661:0:2739
+1662:2:2157
+1663:2:2158
+1664:2:2162
+1665:2:2163
+1666:2:2171
+1667:2:2172
+1668:2:2176
+1669:2:2177
+1670:2:2185
+1671:2:2190
+1672:2:2194
+1673:2:2195
+1674:2:2203
+1675:2:2204
+1676:2:2208
+1677:2:2209
+1678:2:2203
+1679:2:2204
+1680:2:2208
+1681:2:2209
+1682:2:2217
+1683:2:2222
+1684:2:2223
+1685:2:2234
+1686:2:2235
+1687:2:2236
+1688:2:2247
+1689:2:2252
+1690:2:2253
+1691:2:2264
+1692:2:2265
+1693:2:2266
+1694:2:2264
+1695:2:2265
+1696:2:2266
+1697:2:2277
+1698:0:2739
+1699:2:1175
+1700:0:2739
+1701:1:42
+1702:0:2739
+1703:2:2023
+1704:2:2024
+1705:2:2028
+1706:2:2029
+1707:2:2037
+1708:2:2038
+1709:2:2042
+1710:2:2043
+1711:2:2051
+1712:2:2056
+1713:2:2060
+1714:2:2061
+1715:2:2069
+1716:2:2070
+1717:2:2074
+1718:2:2075
+1719:2:2069
+1720:2:2070
+1721:2:2074
+1722:2:2075
+1723:2:2083
+1724:2:2088
+1725:2:2089
+1726:2:2100
+1727:2:2101
+1728:2:2102
+1729:2:2113
+1730:2:2118
+1731:2:2119
+1732:2:2130
+1733:2:2131
+1734:2:2132
+1735:2:2130
+1736:2:2131
+1737:2:2132
+1738:2:2143
+1739:2:2151
+1740:0:2739
+1741:2:1175
+1742:0:2739
+1743:2:2157
+1744:2:2158
+1745:2:2162
+1746:2:2163
+1747:2:2171
+1748:2:2172
+1749:2:2176
+1750:2:2177
+1751:2:2185
+1752:2:2190
+1753:2:2194
+1754:2:2195
+1755:2:2203
+1756:2:2204
+1757:2:2208
+1758:2:2209
+1759:2:2203
+1760:2:2204
+1761:2:2208
+1762:2:2209
+1763:2:2217
+1764:2:2222
+1765:2:2223
+1766:2:2234
+1767:2:2235
+1768:2:2236
+1769:2:2247
+1770:2:2252
+1771:2:2253
+1772:2:2264
+1773:2:2265
+1774:2:2266
+1775:2:2264
+1776:2:2265
+1777:2:2266
+1778:2:2277
+1779:0:2739
+1780:1:143
+1781:0:2739
+1782:2:1175
+1783:0:2739
+1784:2:2023
+1785:2:2024
+1786:2:2028
+1787:2:2029
+1788:2:2037
+1789:2:2038
+1790:2:2042
+1791:2:2043
+1792:2:2051
+1793:2:2056
+1794:2:2060
+1795:2:2061
+1796:2:2069
+1797:2:2070
+1798:2:2074
+1799:2:2075
+1800:2:2069
+1801:2:2070
+1802:2:2074
+1803:2:2075
+1804:2:2083
+1805:2:2088
+1806:2:2089
+1807:2:2100
+1808:2:2101
+1809:2:2102
+1810:2:2113
+1811:2:2118
+1812:2:2119
+1813:2:2130
+1814:2:2131
+1815:2:2132
+1816:2:2130
+1817:2:2131
+1818:2:2132
+1819:2:2143
+1820:2:2151
+1821:0:2739
+1822:2:1175
+1823:0:2739
+1824:1:145
+1825:0:2739
+1826:2:2157
+1827:2:2158
+1828:2:2162
+1829:2:2163
+1830:2:2171
+1831:2:2172
+1832:2:2176
+1833:2:2177
+1834:2:2185
+1835:2:2190
+1836:2:2194
+1837:2:2195
+1838:2:2203
+1839:2:2204
+1840:2:2208
+1841:2:2209
+1842:2:2203
+1843:2:2204
+1844:2:2208
+1845:2:2209
+1846:2:2217
+1847:2:2222
+1848:2:2223
+1849:2:2234
+1850:2:2235
+1851:2:2236
+1852:2:2247
+1853:2:2252
+1854:2:2253
+1855:2:2264
+1856:2:2265
+1857:2:2266
+1858:2:2264
+1859:2:2265
+1860:2:2266
+1861:2:2277
+1862:0:2739
+1863:2:1175
+1864:0:2739
+1865:2:2023
+1866:2:2024
+1867:2:2028
+1868:2:2029
+1869:2:2037
+1870:2:2038
+1871:2:2042
+1872:2:2043
+1873:2:2051
+1874:2:2056
+1875:2:2060
+1876:2:2061
+1877:2:2069
+1878:2:2070
+1879:2:2074
+1880:2:2075
+1881:2:2069
+1882:2:2070
+1883:2:2074
+1884:2:2075
+1885:2:2083
+1886:2:2088
+1887:2:2089
+1888:2:2100
+1889:2:2101
+1890:2:2102
+1891:2:2113
+1892:2:2118
+1893:2:2119
+1894:2:2130
+1895:2:2131
+1896:2:2132
+1897:2:2130
+1898:2:2131
+1899:2:2132
+1900:2:2143
+1901:2:2151
+1902:0:2739
+1903:1:44
+1904:0:2739
+1905:2:1175
+1906:0:2739
+1907:2:2157
+1908:2:2158
+1909:2:2162
+1910:2:2163
+1911:2:2171
+1912:2:2172
+1913:2:2176
+1914:2:2177
+1915:2:2185
+1916:2:2190
+1917:2:2194
+1918:2:2195
+1919:2:2203
+1920:2:2204
+1921:2:2208
+1922:2:2209
+1923:2:2203
+1924:2:2204
+1925:2:2208
+1926:2:2209
+1927:2:2217
+1928:2:2222
+1929:2:2223
+1930:2:2234
+1931:2:2235
+1932:2:2236
+1933:2:2247
+1934:2:2252
+1935:2:2253
+1936:2:2264
+1937:2:2265
+1938:2:2266
+1939:2:2264
+1940:2:2265
+1941:2:2266
+1942:2:2277
+1943:0:2739
+1944:2:1175
+1945:0:2739
+1946:1:810
+1947:0:2739
+1948:2:2023
+1949:2:2024
+1950:2:2028
+1951:2:2029
+1952:2:2037
+1953:2:2038
+1954:2:2042
+1955:2:2043
+1956:2:2051
+1957:2:2056
+1958:2:2060
+1959:2:2061
+1960:2:2069
+1961:2:2070
+1962:2:2074
+1963:2:2075
+1964:2:2069
+1965:2:2070
+1966:2:2074
+1967:2:2075
+1968:2:2083
+1969:2:2088
+1970:2:2089
+1971:2:2100
+1972:2:2101
+1973:2:2102
+1974:2:2113
+1975:2:2118
+1976:2:2119
+1977:2:2130
+1978:2:2131
+1979:2:2132
+1980:2:2130
+1981:2:2131
+1982:2:2132
+1983:2:2143
+1984:2:2151
+1985:0:2739
+1986:2:1175
+1987:0:2739
+1988:2:2157
+1989:2:2158
+1990:2:2162
+1991:2:2163
+1992:2:2171
+1993:2:2172
+1994:2:2176
+1995:2:2177
+1996:2:2185
+1997:2:2190
+1998:2:2194
+1999:2:2195
+2000:2:2203
+2001:2:2204
+2002:2:2208
+2003:2:2209
+2004:2:2203
+2005:2:2204
+2006:2:2208
+2007:2:2209
+2008:2:2217
+2009:2:2222
+2010:2:2223
+2011:2:2234
+2012:2:2235
+2013:2:2236
+2014:2:2247
+2015:2:2252
+2016:2:2253
+2017:2:2264
+2018:2:2265
+2019:2:2266
+2020:2:2264
+2021:2:2265
+2022:2:2266
+2023:2:2277
+2024:0:2739
+2025:1:1087
+2026:1:1094
+2027:1:1095
+2028:1:1102
+2029:1:1107
+2030:1:1114
+2031:1:1115
+2032:1:1114
+2033:1:1115
+2034:1:1122
+2035:1:1126
+2036:0:2739
+2037:2:1175
+2038:0:2739
+2039:2:2023
+2040:2:2024
+2041:2:2028
+2042:2:2029
+2043:2:2037
+2044:2:2038
+2045:2:2042
+2046:2:2043
+2047:2:2051
+2048:2:2056
+2049:2:2060
+2050:2:2061
+2051:2:2069
+2052:2:2070
+2053:2:2074
+2054:2:2075
+2055:2:2069
+2056:2:2070
+2057:2:2074
+2058:2:2075
+2059:2:2083
+2060:2:2088
+2061:2:2089
+2062:2:2100
+2063:2:2101
+2064:2:2102
+2065:2:2113
+2066:2:2118
+2067:2:2119
+2068:2:2130
+2069:2:2131
+2070:2:2132
+2071:2:2130
+2072:2:2131
+2073:2:2132
+2074:2:2143
+2075:2:2151
+2076:0:2739
+2077:2:1175
+2078:0:2739
+2079:1:812
+2080:1:813
+2081:0:2739
+2082:1:40
+2083:0:2739
+2084:1:41
+2085:0:2739
+2086:2:2157
+2087:2:2158
+2088:2:2162
+2089:2:2163
+2090:2:2171
+2091:2:2172
+2092:2:2176
+2093:2:2177
+2094:2:2185
+2095:2:2190
+2096:2:2194
+2097:2:2195
+2098:2:2203
+2099:2:2204
+2100:2:2208
+2101:2:2209
+2102:2:2203
+2103:2:2204
+2104:2:2208
+2105:2:2209
+2106:2:2217
+2107:2:2222
+2108:2:2223
+2109:2:2234
+2110:2:2235
+2111:2:2236
+2112:2:2247
+2113:2:2252
+2114:2:2253
+2115:2:2264
+2116:2:2265
+2117:2:2266
+2118:2:2264
+2119:2:2265
+2120:2:2266
+2121:2:2277
+2122:0:2739
+2123:2:1175
+2124:0:2739
+2125:2:2023
+2126:2:2024
+2127:2:2028
+2128:2:2029
+2129:2:2037
+2130:2:2038
+2131:2:2042
+2132:2:2043
+2133:2:2051
+2134:2:2056
+2135:2:2060
+2136:2:2061
+2137:2:2069
+2138:2:2070
+2139:2:2074
+2140:2:2075
+2141:2:2069
+2142:2:2070
+2143:2:2074
+2144:2:2075
+2145:2:2083
+2146:2:2088
+2147:2:2089
+2148:2:2100
+2149:2:2101
+2150:2:2102
+2151:2:2113
+2152:2:2118
+2153:2:2119
+2154:2:2130
+2155:2:2131
+2156:2:2132
+2157:2:2130
+2158:2:2131
+2159:2:2132
+2160:2:2143
+2161:2:2151
+2162:0:2739
+2163:1:42
+2164:0:2739
+2165:2:1175
+2166:0:2739
+2167:2:2157
+2168:2:2158
+2169:2:2162
+2170:2:2163
+2171:2:2171
+2172:2:2172
+2173:2:2176
+2174:2:2177
+2175:2:2185
+2176:2:2190
+2177:2:2194
+2178:2:2195
+2179:2:2203
+2180:2:2204
+2181:2:2208
+2182:2:2209
+2183:2:2203
+2184:2:2204
+2185:2:2208
+2186:2:2209
+2187:2:2217
+2188:2:2222
+2189:2:2223
+2190:2:2234
+2191:2:2235
+2192:2:2236
+2193:2:2247
+2194:2:2252
+2195:2:2253
+2196:2:2264
+2197:2:2265
+2198:2:2266
+2199:2:2264
+2200:2:2265
+2201:2:2266
+2202:2:2277
+2203:0:2739
+2204:2:1175
+2205:0:2739
+2206:1:143
+2207:0:2739
+2208:2:2023
+2209:2:2024
+2210:2:2028
+2211:2:2029
+2212:2:2037
+2213:2:2038
+2214:2:2042
+2215:2:2043
+2216:2:2051
+2217:2:2056
+2218:2:2060
+2219:2:2061
+2220:2:2069
+2221:2:2070
+2222:2:2074
+2223:2:2075
+2224:2:2069
+2225:2:2070
+2226:2:2074
+2227:2:2075
+2228:2:2083
+2229:2:2088
+2230:2:2089
+2231:2:2100
+2232:2:2101
+2233:2:2102
+2234:2:2113
+2235:2:2118
+2236:2:2119
+2237:2:2130
+2238:2:2131
+2239:2:2132
+2240:2:2130
+2241:2:2131
+2242:2:2132
+2243:2:2143
+2244:2:2151
+2245:0:2739
+2246:2:1175
+2247:0:2739
+2248:2:2157
+2249:2:2158
+2250:2:2162
+2251:2:2163
+2252:2:2171
+2253:2:2172
+2254:2:2176
+2255:2:2177
+2256:2:2185
+2257:2:2190
+2258:2:2194
+2259:2:2195
+2260:2:2203
+2261:2:2204
+2262:2:2208
+2263:2:2209
+2264:2:2203
+2265:2:2204
+2266:2:2208
+2267:2:2209
+2268:2:2217
+2269:2:2222
+2270:2:2223
+2271:2:2234
+2272:2:2235
+2273:2:2236
+2274:2:2247
+2275:2:2252
+2276:2:2253
+2277:2:2264
+2278:2:2265
+2279:2:2266
+2280:2:2264
+2281:2:2265
+2282:2:2266
+2283:2:2277
+2284:0:2739
+2285:1:145
+2286:0:2739
+2287:2:1175
+2288:0:2739
+2289:2:2023
+2290:2:2024
+2291:2:2028
+2292:2:2029
+2293:2:2037
+2294:2:2038
+2295:2:2042
+2296:2:2043
+2297:2:2051
+2298:2:2056
+2299:2:2060
+2300:2:2061
+2301:2:2069
+2302:2:2070
+2303:2:2074
+2304:2:2075
+2305:2:2069
+2306:2:2070
+2307:2:2074
+2308:2:2075
+2309:2:2083
+2310:2:2088
+2311:2:2089
+2312:2:2100
+2313:2:2101
+2314:2:2102
+2315:2:2113
+2316:2:2118
+2317:2:2119
+2318:2:2130
+2319:2:2131
+2320:2:2132
+2321:2:2130
+2322:2:2131
+2323:2:2132
+2324:2:2143
+2325:2:2151
+2326:0:2739
+2327:2:1175
+2328:0:2739
+2329:1:44
+2330:0:2739
+2331:2:2157
+2332:2:2158
+2333:2:2162
+2334:2:2163
+2335:2:2171
+2336:2:2172
+2337:2:2176
+2338:2:2177
+2339:2:2185
+2340:2:2190
+2341:2:2194
+2342:2:2195
+2343:2:2203
+2344:2:2204
+2345:2:2208
+2346:2:2209
+2347:2:2203
+2348:2:2204
+2349:2:2208
+2350:2:2209
+2351:2:2217
+2352:2:2222
+2353:2:2223
+2354:2:2234
+2355:2:2235
+2356:2:2236
+2357:2:2247
+2358:2:2252
+2359:2:2253
+2360:2:2264
+2361:2:2265
+2362:2:2266
+2363:2:2264
+2364:2:2265
+2365:2:2266
+2366:2:2277
+2367:0:2739
+2368:2:1175
+2369:0:2739
+2370:2:2023
+2371:2:2024
+2372:2:2028
+2373:2:2029
+2374:2:2037
+2375:2:2038
+2376:2:2042
+2377:2:2043
+2378:2:2051
+2379:2:2056
+2380:2:2060
+2381:2:2061
+2382:2:2069
+2383:2:2070
+2384:2:2074
+2385:2:2075
+2386:2:2069
+2387:2:2070
+2388:2:2074
+2389:2:2075
+2390:2:2083
+2391:2:2088
+2392:2:2089
+2393:2:2100
+2394:2:2101
+2395:2:2102
+2396:2:2113
+2397:2:2118
+2398:2:2119
+2399:2:2130
+2400:2:2131
+2401:2:2132
+2402:2:2130
+2403:2:2131
+2404:2:2132
+2405:2:2143
+2406:2:2151
+2407:0:2739
+2408:1:816
+2409:1:817
+2410:1:821
+2411:1:822
+2412:1:830
+2413:1:831
+2414:1:835
+2415:1:836
+2416:1:844
+2417:1:849
+2418:1:853
+2419:1:854
+2420:1:862
+2421:1:863
+2422:1:867
+2423:1:868
+2424:1:862
+2425:1:863
+2426:1:867
+2427:1:868
+2428:1:876
+2429:1:881
+2430:1:882
+2431:1:893
+2432:1:894
+2433:1:895
+2434:1:906
+2435:1:911
+2436:1:912
+2437:1:923
+2438:1:924
+2439:1:925
+2440:1:923
+2441:1:924
+2442:1:925
+2443:1:936
+2444:0:2739
+2445:2:1175
+2446:0:2739
+2447:1:40
+2448:0:2739
+2449:1:41
+2450:0:2739
+2451:2:2157
+2452:2:2158
+2453:2:2162
+2454:2:2163
+2455:2:2171
+2456:2:2172
+2457:2:2176
+2458:2:2177
+2459:2:2185
+2460:2:2190
+2461:2:2194
+2462:2:2195
+2463:2:2203
+2464:2:2204
+2465:2:2208
+2466:2:2209
+2467:2:2203
+2468:2:2204
+2469:2:2208
+2470:2:2209
+2471:2:2217
+2472:2:2222
+2473:2:2223
+2474:2:2234
+2475:2:2235
+2476:2:2236
+2477:2:2247
+2478:2:2252
+2479:2:2253
+2480:2:2264
+2481:2:2265
+2482:2:2266
+2483:2:2264
+2484:2:2265
+2485:2:2266
+2486:2:2277
+2487:0:2739
+2488:2:1175
+2489:0:2739
+2490:2:2023
+2491:2:2024
+2492:2:2028
+2493:2:2029
+2494:2:2037
+2495:2:2038
+2496:2:2042
+2497:2:2043
+2498:2:2051
+2499:2:2056
+2500:2:2060
+2501:2:2061
+2502:2:2069
+2503:2:2070
+2504:2:2074
+2505:2:2075
+2506:2:2069
+2507:2:2070
+2508:2:2074
+2509:2:2075
+2510:2:2083
+2511:2:2088
+2512:2:2089
+2513:2:2100
+2514:2:2101
+2515:2:2102
+2516:2:2113
+2517:2:2118
+2518:2:2119
+2519:2:2130
+2520:2:2131
+2521:2:2132
+2522:2:2130
+2523:2:2131
+2524:2:2132
+2525:2:2143
+2526:2:2151
+2527:0:2739
+2528:1:42
+2529:0:2739
+2530:2:1175
+2531:0:2739
+2532:2:2157
+2533:2:2158
+2534:2:2162
+2535:2:2163
+2536:2:2171
+2537:2:2172
+2538:2:2176
+2539:2:2177
+2540:2:2185
+2541:2:2190
+2542:2:2194
+2543:2:2195
+2544:2:2203
+2545:2:2204
+2546:2:2208
+2547:2:2209
+2548:2:2203
+2549:2:2204
+2550:2:2208
+2551:2:2209
+2552:2:2217
+2553:2:2222
+2554:2:2223
+2555:2:2234
+2556:2:2235
+2557:2:2236
+2558:2:2247
+2559:2:2252
+2560:2:2253
+2561:2:2264
+2562:2:2265
+2563:2:2266
+2564:2:2264
+2565:2:2265
+2566:2:2266
+2567:2:2277
+2568:0:2739
+2569:2:1175
+2570:0:2739
+2571:1:143
+2572:0:2739
+2573:2:2023
+2574:2:2024
+2575:2:2028
+2576:2:2029
+2577:2:2037
+2578:2:2038
+2579:2:2042
+2580:2:2043
+2581:2:2051
+2582:2:2056
+2583:2:2060
+2584:2:2061
+2585:2:2069
+2586:2:2070
+2587:2:2074
+2588:2:2075
+2589:2:2069
+2590:2:2070
+2591:2:2074
+2592:2:2075
+2593:2:2083
+2594:2:2088
+2595:2:2089
+2596:2:2100
+2597:2:2101
+2598:2:2102
+2599:2:2113
+2600:2:2118
+2601:2:2119
+2602:2:2130
+2603:2:2131
+2604:2:2132
+2605:2:2130
+2606:2:2131
+2607:2:2132
+2608:2:2143
+2609:2:2151
+2610:0:2739
+2611:2:1175
+2612:0:2739
+2613:2:2157
+2614:2:2158
+2615:2:2162
+2616:2:2163
+2617:2:2171
+2618:2:2172
+2619:2:2176
+2620:2:2177
+2621:2:2185
+2622:2:2190
+2623:2:2194
+2624:2:2195
+2625:2:2203
+2626:2:2204
+2627:2:2208
+2628:2:2209
+2629:2:2203
+2630:2:2204
+2631:2:2208
+2632:2:2209
+2633:2:2217
+2634:2:2222
+2635:2:2223
+2636:2:2234
+2637:2:2235
+2638:2:2236
+2639:2:2247
+2640:2:2252
+2641:2:2253
+2642:2:2264
+2643:2:2265
+2644:2:2266
+2645:2:2264
+2646:2:2265
+2647:2:2266
+2648:2:2277
+2649:0:2739
+2650:1:145
+2651:0:2739
+2652:2:1175
+2653:0:2739
+2654:2:2023
+2655:2:2024
+2656:2:2028
+2657:2:2029
+2658:2:2037
+2659:2:2038
+2660:2:2042
+2661:2:2043
+2662:2:2051
+2663:2:2056
+2664:2:2060
+2665:2:2061
+2666:2:2069
+2667:2:2070
+2668:2:2074
+2669:2:2075
+2670:2:2069
+2671:2:2070
+2672:2:2074
+2673:2:2075
+2674:2:2083
+2675:2:2088
+2676:2:2089
+2677:2:2100
+2678:2:2101
+2679:2:2102
+2680:2:2113
+2681:2:2118
+2682:2:2119
+2683:2:2130
+2684:2:2131
+2685:2:2132
+2686:2:2130
+2687:2:2131
+2688:2:2132
+2689:2:2143
+2690:2:2151
+2691:0:2739
+2692:2:1175
+2693:0:2739
+2694:1:44
+2695:0:2739
+2696:2:2157
+2697:2:2158
+2698:2:2162
+2699:2:2163
+2700:2:2171
+2701:2:2172
+2702:2:2176
+2703:2:2177
+2704:2:2185
+2705:2:2190
+2706:2:2194
+2707:2:2195
+2708:2:2203
+2709:2:2204
+2710:2:2208
+2711:2:2209
+2712:2:2203
+2713:2:2204
+2714:2:2208
+2715:2:2209
+2716:2:2217
+2717:2:2222
+2718:2:2223
+2719:2:2234
+2720:2:2235
+2721:2:2236
+2722:2:2247
+2723:2:2252
+2724:2:2253
+2725:2:2264
+2726:2:2265
+2727:2:2266
+2728:2:2264
+2729:2:2265
+2730:2:2266
+2731:2:2277
+2732:0:2739
+2733:2:1175
+2734:0:2739
+2735:2:2023
+2736:2:2024
+2737:2:2028
+2738:2:2029
+2739:2:2037
+2740:2:2038
+2741:2:2042
+2742:2:2043
+2743:2:2051
+2744:2:2056
+2745:2:2060
+2746:2:2061
+2747:2:2069
+2748:2:2070
+2749:2:2074
+2750:2:2075
+2751:2:2069
+2752:2:2070
+2753:2:2074
+2754:2:2075
+2755:2:2083
+2756:2:2088
+2757:2:2089
+2758:2:2100
+2759:2:2101
+2760:2:2102
+2761:2:2113
+2762:2:2118
+2763:2:2119
+2764:2:2130
+2765:2:2131
+2766:2:2132
+2767:2:2130
+2768:2:2131
+2769:2:2132
+2770:2:2143
+2771:2:2151
+2772:0:2739
+2773:1:945
+2774:1:946
+2775:1:950
+2776:1:951
+2777:1:959
+2778:1:960
+2779:1:964
+2780:1:965
+2781:1:973
+2782:1:978
+2783:1:982
+2784:1:983
+2785:1:991
+2786:1:992
+2787:1:996
+2788:1:997
+2789:1:991
+2790:1:992
+2791:1:996
+2792:1:997
+2793:1:1005
+2794:1:1010
+2795:1:1011
+2796:1:1022
+2797:1:1023
+2798:1:1024
+2799:1:1035
+2800:1:1040
+2801:1:1041
+2802:1:1052
+2803:1:1053
+2804:1:1054
+2805:1:1052
+2806:1:1053
+2807:1:1054
+2808:1:1065
+2809:1:1072
+2810:1:1076
+2811:0:2739
+2812:2:1175
+2813:0:2739
+2814:1:40
+2815:0:2739
+2816:1:41
+2817:0:2739
+2818:2:2157
+2819:2:2158
+2820:2:2162
+2821:2:2163
+2822:2:2171
+2823:2:2172
+2824:2:2176
+2825:2:2177
+2826:2:2185
+2827:2:2190
+2828:2:2194
+2829:2:2195
+2830:2:2203
+2831:2:2204
+2832:2:2208
+2833:2:2209
+2834:2:2203
+2835:2:2204
+2836:2:2208
+2837:2:2209
+2838:2:2217
+2839:2:2222
+2840:2:2223
+2841:2:2234
+2842:2:2235
+2843:2:2236
+2844:2:2247
+2845:2:2252
+2846:2:2253
+2847:2:2264
+2848:2:2265
+2849:2:2266
+2850:2:2264
+2851:2:2265
+2852:2:2266
+2853:2:2277
+2854:0:2739
+2855:2:1175
+2856:0:2739
+2857:2:2023
+2858:2:2024
+2859:2:2028
+2860:2:2029
+2861:2:2037
+2862:2:2038
+2863:2:2042
+2864:2:2043
+2865:2:2051
+2866:2:2056
+2867:2:2060
+2868:2:2061
+2869:2:2069
+2870:2:2070
+2871:2:2074
+2872:2:2075
+2873:2:2069
+2874:2:2070
+2875:2:2074
+2876:2:2075
+2877:2:2083
+2878:2:2088
+2879:2:2089
+2880:2:2100
+2881:2:2101
+2882:2:2102
+2883:2:2113
+2884:2:2118
+2885:2:2119
+2886:2:2130
+2887:2:2131
+2888:2:2132
+2889:2:2130
+2890:2:2131
+2891:2:2132
+2892:2:2143
+2893:2:2151
+2894:0:2739
+2895:1:42
+2896:0:2739
+2897:2:1175
+2898:0:2739
+2899:2:2157
+2900:2:2158
+2901:2:2162
+2902:2:2163
+2903:2:2171
+2904:2:2172
+2905:2:2176
+2906:2:2177
+2907:2:2185
+2908:2:2190
+2909:2:2194
+2910:2:2195
+2911:2:2203
+2912:2:2204
+2913:2:2208
+2914:2:2209
+2915:2:2203
+2916:2:2204
+2917:2:2208
+2918:2:2209
+2919:2:2217
+2920:2:2222
+2921:2:2223
+2922:2:2234
+2923:2:2235
+2924:2:2236
+2925:2:2247
+2926:2:2252
+2927:2:2253
+2928:2:2264
+2929:2:2265
+2930:2:2266
+2931:2:2264
+2932:2:2265
+2933:2:2266
+2934:2:2277
+2935:0:2739
+2936:2:1175
+2937:0:2739
+2938:1:143
+2939:0:2739
+2940:2:2023
+2941:2:2024
+2942:2:2028
+2943:2:2029
+2944:2:2037
+2945:2:2038
+2946:2:2042
+2947:2:2043
+2948:2:2051
+2949:2:2056
+2950:2:2060
+2951:2:2061
+2952:2:2069
+2953:2:2070
+2954:2:2074
+2955:2:2075
+2956:2:2069
+2957:2:2070
+2958:2:2074
+2959:2:2075
+2960:2:2083
+2961:2:2088
+2962:2:2089
+2963:2:2100
+2964:2:2101
+2965:2:2102
+2966:2:2113
+2967:2:2118
+2968:2:2119
+2969:2:2130
+2970:2:2131
+2971:2:2132
+2972:2:2130
+2973:2:2131
+2974:2:2132
+2975:2:2143
+2976:2:2151
+2977:0:2739
+2978:2:1175
+2979:0:2739
+2980:2:2157
+2981:2:2158
+2982:2:2162
+2983:2:2163
+2984:2:2171
+2985:2:2172
+2986:2:2176
+2987:2:2177
+2988:2:2185
+2989:2:2190
+2990:2:2194
+2991:2:2195
+2992:2:2203
+2993:2:2204
+2994:2:2208
+2995:2:2209
+2996:2:2203
+2997:2:2204
+2998:2:2208
+2999:2:2209
+3000:2:2217
+3001:2:2222
+3002:2:2223
+3003:2:2234
+3004:2:2235
+3005:2:2236
+3006:2:2247
+3007:2:2252
+3008:2:2253
+3009:2:2264
+3010:2:2265
+3011:2:2266
+3012:2:2264
+3013:2:2265
+3014:2:2266
+3015:2:2277
+3016:0:2739
+3017:1:145
+3018:0:2739
+3019:2:1175
+3020:0:2739
+3021:2:2023
+3022:2:2024
+3023:2:2028
+3024:2:2029
+3025:2:2037
+3026:2:2038
+3027:2:2042
+3028:2:2043
+3029:2:2051
+3030:2:2056
+3031:2:2060
+3032:2:2061
+3033:2:2069
+3034:2:2070
+3035:2:2074
+3036:2:2075
+3037:2:2069
+3038:2:2070
+3039:2:2074
+3040:2:2075
+3041:2:2083
+3042:2:2088
+3043:2:2089
+3044:2:2100
+3045:2:2101
+3046:2:2102
+3047:2:2113
+3048:2:2118
+3049:2:2119
+3050:2:2130
+3051:2:2131
+3052:2:2132
+3053:2:2130
+3054:2:2131
+3055:2:2132
+3056:2:2143
+3057:2:2151
+3058:0:2739
+3059:2:1175
+3060:0:2739
+3061:1:44
+3062:0:2739
+3063:2:2157
+3064:2:2158
+3065:2:2162
+3066:2:2163
+3067:2:2171
+3068:2:2172
+3069:2:2176
+3070:2:2177
+3071:2:2185
+3072:2:2190
+3073:2:2194
+3074:2:2195
+3075:2:2203
+3076:2:2204
+3077:2:2208
+3078:2:2209
+3079:2:2203
+3080:2:2204
+3081:2:2208
+3082:2:2209
+3083:2:2217
+3084:2:2222
+3085:2:2223
+3086:2:2234
+3087:2:2235
+3088:2:2236
+3089:2:2247
+3090:2:2252
+3091:2:2253
+3092:2:2264
+3093:2:2265
+3094:2:2266
+3095:2:2264
+3096:2:2265
+3097:2:2266
+3098:2:2277
+3099:0:2739
+3100:2:1175
+3101:0:2739
+3102:2:2023
+3103:2:2024
+3104:2:2028
+3105:2:2029
+3106:2:2037
+3107:2:2038
+3108:2:2042
+3109:2:2043
+3110:2:2051
+3111:2:2056
+3112:2:2060
+3113:2:2061
+3114:2:2069
+3115:2:2070
+3116:2:2074
+3117:2:2075
+3118:2:2069
+3119:2:2070
+3120:2:2074
+3121:2:2075
+3122:2:2083
+3123:2:2088
+3124:2:2089
+3125:2:2100
+3126:2:2101
+3127:2:2102
+3128:2:2113
+3129:2:2118
+3130:2:2119
+3131:2:2130
+3132:2:2131
+3133:2:2132
+3134:2:2130
+3135:2:2131
+3136:2:2132
+3137:2:2143
+3138:2:2151
+3139:0:2739
+3140:1:1077
+3141:0:2739
+3142:2:1175
+3143:0:2739
+3144:1:1085
+3145:0:2739
+3146:1:1130
+3147:0:2739
+3148:1:36
+3149:0:2739
+3150:2:2157
+3151:2:2158
+3152:2:2162
+3153:2:2163
+3154:2:2171
+3155:2:2172
+3156:2:2176
+3157:2:2177
+3158:2:2185
+3159:2:2190
+3160:2:2194
+3161:2:2195
+3162:2:2203
+3163:2:2204
+3164:2:2208
+3165:2:2209
+3166:2:2203
+3167:2:2204
+3168:2:2208
+3169:2:2209
+3170:2:2217
+3171:2:2222
+3172:2:2223
+3173:2:2234
+3174:2:2235
+3175:2:2236
+3176:2:2247
+3177:2:2252
+3178:2:2253
+3179:2:2264
+3180:2:2265
+3181:2:2266
+3182:2:2264
+3183:2:2265
+3184:2:2266
+3185:2:2277
+3186:0:2739
+3187:2:1175
+3188:0:2739
+3189:2:2023
+3190:2:2024
+3191:2:2028
+3192:2:2029
+3193:2:2037
+3194:2:2038
+3195:2:2042
+3196:2:2043
+3197:2:2051
+3198:2:2056
+3199:2:2060
+3200:2:2061
+3201:2:2069
+3202:2:2070
+3203:2:2074
+3204:2:2075
+3205:2:2069
+3206:2:2070
+3207:2:2074
+3208:2:2075
+3209:2:2083
+3210:2:2088
+3211:2:2089
+3212:2:2100
+3213:2:2101
+3214:2:2102
+3215:2:2113
+3216:2:2118
+3217:2:2119
+3218:2:2130
+3219:2:2131
+3220:2:2132
+3221:2:2130
+3222:2:2131
+3223:2:2132
+3224:2:2143
+3225:2:2151
+3226:0:2739
+3227:1:37
+3228:0:2739
+3229:2:1175
+3230:0:2739
+3231:2:2157
+3232:2:2158
+3233:2:2162
+3234:2:2163
+3235:2:2171
+3236:2:2172
+3237:2:2176
+3238:2:2177
+3239:2:2185
+3240:2:2190
+3241:2:2194
+3242:2:2195
+3243:2:2203
+3244:2:2204
+3245:2:2208
+3246:2:2209
+3247:2:2203
+3248:2:2204
+3249:2:2208
+3250:2:2209
+3251:2:2217
+3252:2:2222
+3253:2:2223
+3254:2:2234
+3255:2:2235
+3256:2:2236
+3257:2:2247
+3258:2:2252
+3259:2:2253
+3260:2:2264
+3261:2:2265
+3262:2:2266
+3263:2:2264
+3264:2:2265
+3265:2:2266
+3266:2:2277
+3267:0:2739
+3268:2:1175
+3269:0:2739
+3270:1:38
+3271:0:2739
+3272:2:2023
+3273:2:2024
+3274:2:2028
+3275:2:2029
+3276:2:2037
+3277:2:2038
+3278:2:2042
+3279:2:2043
+3280:2:2051
+3281:2:2056
+3282:2:2060
+3283:2:2061
+3284:2:2069
+3285:2:2070
+3286:2:2074
+3287:2:2075
+3288:2:2069
+3289:2:2070
+3290:2:2074
+3291:2:2075
+3292:2:2083
+3293:2:2088
+3294:2:2089
+3295:2:2100
+3296:2:2101
+3297:2:2102
+3298:2:2113
+3299:2:2118
+3300:2:2119
+3301:2:2130
+3302:2:2131
+3303:2:2132
+3304:2:2130
+3305:2:2131
+3306:2:2132
+3307:2:2143
+3308:2:2151
+3309:0:2739
+3310:2:1175
+3311:0:2739
+3312:2:2157
+3313:2:2158
+3314:2:2162
+3315:2:2163
+3316:2:2171
+3317:2:2172
+3318:2:2176
+3319:2:2177
+3320:2:2185
+3321:2:2190
+3322:2:2194
+3323:2:2195
+3324:2:2203
+3325:2:2204
+3326:2:2208
+3327:2:2209
+3328:2:2203
+3329:2:2204
+3330:2:2208
+3331:2:2209
+3332:2:2217
+3333:2:2222
+3334:2:2223
+3335:2:2234
+3336:2:2235
+3337:2:2236
+3338:2:2247
+3339:2:2252
+3340:2:2253
+3341:2:2264
+3342:2:2265
+3343:2:2266
+3344:2:2264
+3345:2:2265
+3346:2:2266
+3347:2:2277
+3348:0:2739
+3349:1:39
+3350:0:2739
+3351:2:1175
+3352:0:2739
+3353:1:40
+3354:0:2739
+3355:1:41
+3356:0:2739
+3357:2:2023
+3358:2:2024
+3359:2:2028
+3360:2:2029
+3361:2:2037
+3362:2:2038
+3363:2:2042
+3364:2:2043
+3365:2:2051
+3366:2:2056
+3367:2:2060
+3368:2:2061
+3369:2:2069
+3370:2:2070
+3371:2:2074
+3372:2:2075
+3373:2:2069
+3374:2:2070
+3375:2:2074
+3376:2:2075
+3377:2:2083
+3378:2:2088
+3379:2:2089
+3380:2:2100
+3381:2:2101
+3382:2:2102
+3383:2:2113
+3384:2:2118
+3385:2:2119
+3386:2:2130
+3387:2:2131
+3388:2:2132
+3389:2:2130
+3390:2:2131
+3391:2:2132
+3392:2:2143
+3393:2:2151
+3394:0:2739
+3395:2:1175
+3396:0:2739
+3397:2:2157
+3398:2:2158
+3399:2:2162
+3400:2:2163
+3401:2:2171
+3402:2:2172
+3403:2:2176
+3404:2:2177
+3405:2:2185
+3406:2:2190
+3407:2:2194
+3408:2:2195
+3409:2:2203
+3410:2:2204
+3411:2:2208
+3412:2:2209
+3413:2:2203
+3414:2:2204
+3415:2:2208
+3416:2:2209
+3417:2:2217
+3418:2:2222
+3419:2:2223
+3420:2:2234
+3421:2:2235
+3422:2:2236
+3423:2:2247
+3424:2:2252
+3425:2:2253
+3426:2:2264
+3427:2:2265
+3428:2:2266
+3429:2:2264
+3430:2:2265
+3431:2:2266
+3432:2:2277
+3433:0:2739
+3434:1:42
+3435:0:2739
+3436:2:1175
+3437:0:2739
+3438:2:2023
+3439:2:2024
+3440:2:2028
+3441:2:2029
+3442:2:2037
+3443:2:2038
+3444:2:2042
+3445:2:2043
+3446:2:2051
+3447:2:2056
+3448:2:2060
+3449:2:2061
+3450:2:2069
+3451:2:2070
+3452:2:2074
+3453:2:2075
+3454:2:2069
+3455:2:2070
+3456:2:2074
+3457:2:2075
+3458:2:2083
+3459:2:2088
+3460:2:2089
+3461:2:2100
+3462:2:2101
+3463:2:2102
+3464:2:2113
+3465:2:2118
+3466:2:2119
+3467:2:2130
+3468:2:2131
+3469:2:2132
+3470:2:2130
+3471:2:2131
+3472:2:2132
+3473:2:2143
+3474:2:2151
+3475:0:2739
+3476:2:1175
+3477:0:2739
+3478:1:143
+3479:0:2739
+3480:2:2157
+3481:2:2158
+3482:2:2162
+3483:2:2163
+3484:2:2171
+3485:2:2172
+3486:2:2176
+3487:2:2177
+3488:2:2185
+3489:2:2190
+3490:2:2194
+3491:2:2195
+3492:2:2203
+3493:2:2204
+3494:2:2208
+3495:2:2209
+3496:2:2203
+3497:2:2204
+3498:2:2208
+3499:2:2209
+3500:2:2217
+3501:2:2222
+3502:2:2223
+3503:2:2234
+3504:2:2235
+3505:2:2236
+3506:2:2247
+3507:2:2252
+3508:2:2253
+3509:2:2264
+3510:2:2265
+3511:2:2266
+3512:2:2264
+3513:2:2265
+3514:2:2266
+3515:2:2277
+3516:0:2739
+3517:2:1175
+3518:0:2739
+3519:2:2023
+3520:2:2024
+3521:2:2028
+3522:2:2029
+3523:2:2037
+3524:2:2038
+3525:2:2042
+3526:2:2043
+3527:2:2051
+3528:2:2056
+3529:2:2060
+3530:2:2061
+3531:2:2069
+3532:2:2070
+3533:2:2074
+3534:2:2075
+3535:2:2069
+3536:2:2070
+3537:2:2074
+3538:2:2075
+3539:2:2083
+3540:2:2088
+3541:2:2089
+3542:2:2100
+3543:2:2101
+3544:2:2102
+3545:2:2113
+3546:2:2118
+3547:2:2119
+3548:2:2130
+3549:2:2131
+3550:2:2132
+3551:2:2130
+3552:2:2131
+3553:2:2132
+3554:2:2143
+3555:2:2151
+3556:0:2739
+3557:1:145
+3558:0:2737
+3559:2:1175
+3560:0:2743
+3561:2:2157
+3562:2:2158
+3563:2:2162
+3564:2:2163
+3565:2:2171
+3566:2:2172
+3567:2:2176
+3568:2:2177
+3569:2:2185
+3570:2:2190
+3571:2:2194
+3572:2:2195
+3573:2:2203
+3574:2:2204
+3575:2:2208
+3576:2:2209
+3577:2:2203
+3578:2:2204
+3579:2:2208
+3580:2:2209
+3581:2:2217
+3582:2:2222
+3583:2:2223
+3584:2:2234
+3585:2:2235
+3586:2:2236
+3587:2:2247
+3588:2:2252
+3589:2:2253
+3590:2:2264
+3591:2:2265
+3592:2:2266
+3593:2:2264
+3594:2:2265
+3595:2:2266
+3596:2:2277
+3597:0:2743
+3598:2:1175
+3599:0:2743
+3600:2:2023
+3601:2:2024
+3602:2:2028
+3603:2:2029
+3604:2:2037
+3605:2:2038
+3606:2:2042
+3607:2:2043
+3608:2:2051
+3609:2:2056
+3610:2:2060
+3611:2:2061
+3612:2:2069
+3613:2:2070
+3614:2:2074
+3615:2:2075
+3616:2:2069
+3617:2:2070
+3618:2:2074
+3619:2:2075
+3620:2:2083
+3621:2:2088
+3622:2:2089
+3623:2:2100
+3624:2:2101
+3625:2:2102
+3626:2:2113
+3627:2:2118
+3628:2:2119
+3629:2:2130
+3630:2:2131
+3631:2:2132
+3632:2:2130
+3633:2:2131
+3634:2:2132
+3635:2:2143
+3636:2:2151
+3637:0:2743
+3638:1:44
+3639:0:2743
+3640:2:1175
+3641:0:2743
+3642:2:2157
+3643:2:2158
+3644:2:2162
+3645:2:2163
+3646:2:2171
+3647:2:2172
+3648:2:2176
+3649:2:2177
+3650:2:2185
+3651:2:2190
+3652:2:2194
+3653:2:2195
+3654:2:2203
+3655:2:2204
+3656:2:2208
+3657:2:2209
+3658:2:2203
+3659:2:2204
+3660:2:2208
+3661:2:2209
+3662:2:2217
+3663:2:2222
+3664:2:2223
+3665:2:2234
+3666:2:2235
+3667:2:2236
+3668:2:2247
+3669:2:2252
+3670:2:2253
+3671:2:2264
+3672:2:2265
+3673:2:2266
+3674:2:2264
+3675:2:2265
+3676:2:2266
+3677:2:2277
+3678:0:2743
+3679:2:1175
+3680:0:2743
+3681:2:2023
+3682:2:2024
+3683:2:2028
+3684:2:2029
+3685:2:2037
+3686:2:2038
+3687:2:2042
+3688:2:2043
+3689:2:2051
+3690:2:2056
+3691:2:2060
+3692:2:2061
+3693:2:2069
+3694:2:2070
+3695:2:2074
+3696:2:2075
+3697:2:2069
+3698:2:2070
+3699:2:2074
+3700:2:2075
+3701:2:2083
+3702:2:2088
+3703:2:2089
+3704:2:2100
+3705:2:2101
+3706:2:2102
+3707:2:2113
+3708:2:2118
+3709:2:2119
+3710:2:2130
+3711:2:2131
+3712:2:2132
+3713:2:2130
+3714:2:2131
+3715:2:2132
+3716:2:2143
+3717:2:2151
+3718:0:2743
+3719:1:681
+3720:1:682
+3721:1:686
+3722:1:687
+3723:1:695
+3724:1:696
+3725:1:697
+3726:1:709
+3727:1:714
+3728:1:718
+3729:1:719
+3730:1:727
+3731:1:728
+3732:1:732
+3733:1:733
+3734:1:727
+3735:1:728
+3736:1:732
+3737:1:733
+3738:1:741
+3739:1:746
+3740:1:747
+3741:1:758
+3742:1:759
+3743:1:760
+3744:1:771
+3745:1:776
+3746:1:777
+3747:1:788
+3748:1:789
+3749:1:790
+3750:1:788
+3751:1:789
+3752:1:790
+3753:1:801
+3754:0:2743
+3755:2:1175
+3756:0:2743
+3757:1:40
+3758:0:2743
+3759:1:41
+3760:0:2743
+3761:2:2157
+3762:2:2158
+3763:2:2162
+3764:2:2163
+3765:2:2171
+3766:2:2172
+3767:2:2176
+3768:2:2177
+3769:2:2185
+3770:2:2190
+3771:2:2194
+3772:2:2195
+3773:2:2203
+3774:2:2204
+3775:2:2208
+3776:2:2209
+3777:2:2203
+3778:2:2204
+3779:2:2208
+3780:2:2209
+3781:2:2217
+3782:2:2222
+3783:2:2223
+3784:2:2234
+3785:2:2242
+3786:2:2243
+3787:2:2247
+3788:2:2252
+3789:2:2253
+3790:2:2264
+3791:2:2265
+3792:2:2266
+3793:2:2264
+3794:2:2265
+3795:2:2266
+3796:2:2277
+3797:0:2743
+3798:2:1175
+-1:-1:-1
+3799:0:2743
+3800:2:2023
+3801:2:2024
+3802:2:2028
+3803:2:2029
+3804:2:2037
+3805:2:2038
+3806:2:2042
+3807:2:2043
+3808:2:2051
+3809:2:2056
+3810:2:2060
+3811:2:2061
+3812:2:2069
+3813:2:2070
+3814:2:2074
+3815:2:2075
+3816:2:2069
+3817:2:2070
+3818:2:2074
+3819:2:2075
+3820:2:2083
+3821:2:2088
+3822:2:2089
+3823:2:2100
+3824:2:2108
+3825:2:2109
+3826:2:2113
+3827:2:2118
+3828:2:2119
+3829:2:2130
+3830:2:2131
+3831:2:2132
+3832:2:2130
+3833:2:2131
+3834:2:2132
+3835:2:2143
+3836:2:2151
+3837:0:2743
+3838:2:1175
+3839:0:2743
+3840:2:2157
+3841:2:2158
+3842:2:2162
+3843:2:2163
+3844:2:2171
+3845:2:2172
+3846:2:2176
+3847:2:2177
+3848:2:2185
+3849:2:2190
+3850:2:2194
+3851:2:2195
+3852:2:2203
+3853:2:2204
+3854:2:2208
+3855:2:2209
+3856:2:2203
+3857:2:2204
+3858:2:2208
+3859:2:2209
+3860:2:2217
+3861:2:2222
+3862:2:2223
+3863:2:2234
+3864:2:2242
+3865:2:2243
+3866:2:2247
+3867:2:2252
+3868:2:2253
+3869:2:2264
+3870:2:2265
+3871:2:2266
+3872:2:2264
+3873:2:2265
+3874:2:2266
+3875:2:2277
+3876:0:2743
+3877:2:1175
+3878:0:2743
+3879:2:2023
+3880:2:2024
+3881:2:2028
+3882:2:2029
+3883:2:2037
+3884:2:2038
+3885:2:2042
+3886:2:2043
+3887:2:2051
+3888:2:2056
+3889:2:2060
+3890:2:2061
+3891:2:2069
+3892:2:2070
+3893:2:2074
+3894:2:2075
+3895:2:2069
+3896:2:2070
+3897:2:2074
+3898:2:2075
+3899:2:2083
+3900:2:2088
+3901:2:2089
+3902:2:2100
+3903:2:2108
+3904:2:2109
+3905:2:2113
+3906:2:2118
+3907:2:2119
+3908:2:2130
+3909:2:2131
+3910:2:2132
+3911:2:2130
+3912:2:2131
+3913:2:2132
+3914:2:2143
+3915:2:2151
+3916:0:2743
+3917:2:1175
+3918:0:2743
+3919:2:2157
+3920:2:2158
+3921:2:2162
+3922:2:2163
+3923:2:2171
+3924:2:2172
+3925:2:2176
+3926:2:2177
+3927:2:2185
+3928:2:2190
+3929:2:2194
+3930:2:2195
+3931:2:2203
+3932:2:2204
+3933:2:2208
+3934:2:2209
+3935:2:2203
+3936:2:2204
+3937:2:2208
+3938:2:2209
+3939:2:2217
+3940:2:2222
+3941:2:2223
+3942:2:2234
+3943:2:2242
+3944:2:2243
+3945:2:2247
+3946:2:2252
+3947:2:2253
+3948:2:2264
+3949:2:2265
+3950:2:2266
+3951:2:2264
+3952:2:2265
+3953:2:2266
+3954:2:2277
+3955:0:2743
+3956:2:1175
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/.input.spin b/formal-model/urcu-controldataflow-alpha-ipi/.input.spin
new file mode 100644 (file)
index 0000000..ca70e6c
--- /dev/null
@@ -0,0 +1,1339 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/DEFINES b/formal-model/urcu-controldataflow-alpha-ipi/DEFINES
new file mode 100644 (file)
index 0000000..2681f69
--- /dev/null
@@ -0,0 +1,18 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/Makefile b/formal-model/urcu-controldataflow-alpha-ipi/Makefile
new file mode 100644 (file)
index 0000000..cadd0aa
--- /dev/null
@@ -0,0 +1,172 @@
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) Mathieu Desnoyers, 2009
+#
+# Authors: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+#CFLAGS=-DSAFETY
+#for multi-core verif, 15.5GB shared mem, use files if full
+#CFLAGS=-DHASH64 -DMEMLIM=15500 -DNCORE=2
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88 -DMEMLIM=15500 -DNCORE=8
+
+#liveness
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88
+CFLAGS=-DHASH64 -DCOLLAPSE
+#CFLAGS=-DHASH64
+
+SPINFILE=urcu.spin
+
+default:
+       make urcu_free | tee urcu_free.log
+       make urcu_free_no_mb | tee urcu_free_no_mb.log
+       make urcu_free_no_rmb | tee urcu_free_no_rmb.log
+       make urcu_free_no_wmb | tee urcu_free_no_wmb.log
+       make urcu_free_single_flip | tee urcu_free_single_flip.log
+       #state-space over 14gb..
+       #make urcu_progress_writer | tee urcu_progress_writer.log
+       #make urcu_progress_reader | tee urcu_progress_reader.log
+       #make urcu_progress_writer_error | tee urcu_progress_writer_error.log
+       make asserts | tee asserts.log
+       make summary
+
+#show trail : spin -v -t -N pan.ltl input.spin
+# after each individual make.
+
+summary:
+       @echo
+       @echo "Verification summary"
+       @grep errors: *.log
+
+asserts: clean
+       cat DEFINES > .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X .input.spin
+       gcc -O2 -w ${CFLAGS} -DSAFETY -o pan pan.c
+       ./pan -v -c1 -X -m10000000 -w20
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free: clean urcu_free_ltl run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested: clean urcu_free_ltl urcu_free_nested_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested_define:
+       cp urcu_free_nested.define .input.define
+
+urcu_free_no_rmb: clean urcu_free_ltl urcu_free_no_rmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_rmb_define:
+       cp urcu_free_no_rmb.define .input.define
+
+urcu_free_no_wmb: clean urcu_free_ltl urcu_free_no_wmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_wmb_define:
+       cp urcu_free_no_wmb.define .input.define
+
+urcu_free_no_mb: clean urcu_free_ltl urcu_free_no_mb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_mb_define:
+       cp urcu_free_no_mb.define .input.define
+
+urcu_free_single_flip: clean urcu_free_ltl urcu_free_single_flip_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_single_flip_define:
+       cp urcu_free_single_flip.define .input.define
+
+urcu_free_ltl:
+       touch .input.define
+       cat .input.define >> pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+
+# Progress checks
+
+urcu_progress_writer: clean urcu_progress_writer_ltl \
+               urcu_progress_writer_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_define:
+       cp urcu_progress_writer.define .input.define
+
+urcu_progress_writer_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_reader: clean urcu_progress_reader_ltl \
+               urcu_progress_reader_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_reader_define:
+       cp urcu_progress_reader.define .input.define
+
+urcu_progress_reader_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_writer_error: clean urcu_progress_writer_error_ltl \
+               urcu_progress_writer_error_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_error_define:
+       cp urcu_progress_writer_error.define .input.define
+
+urcu_progress_writer_error_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+
+run_weak_fair: pan
+       ./pan -a -f -v -c1 -X -m10000000 -w20
+
+run: pan
+       ./pan -a -v -c1 -X -m10000000 -w20
+
+pan: pan.c
+       gcc -O2 -w ${CFLAGS} -o pan pan.c
+
+pan.c: pan.ltl ${SPINFILE}
+       cat .input.define > .input.spin
+       cat DEFINES >> .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X -N pan.ltl .input.spin
+
+.PHONY: clean default distclean summary
+clean:
+       rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+distclean:
+       rm -f *.trail *.input *.log
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/asserts.log b/formal-model/urcu-controldataflow-alpha-ipi/asserts.log
new file mode 100644 (file)
index 0000000..6efa47c
--- /dev/null
@@ -0,0 +1,549 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+cat DEFINES > .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w20
+Depth=    6176 States=    1e+06 Transitions= 1.77e+08 Memory=   497.600        t=    218 R=   5e+03
+Depth=    7720 States=    2e+06 Transitions= 3.71e+08 Memory=   528.654        t=    474 R=   4e+03
+Depth=    7720 States=    3e+06 Transitions=  5.8e+08 Memory=   561.955        t=    768 R=   4e+03
+pan: resizing hashtable to -w22..  done
+Depth=    7720 States=    4e+06 Transitions=  7.6e+08 Memory=   627.549        t=    999 R=   4e+03
+Depth=    7720 States=    5e+06 Transitions= 9.44e+08 Memory=   662.217        t= 1.24e+03 R=   4e+03
+Depth=    7720 States=    6e+06 Transitions= 1.35e+09 Memory=   699.619        t= 1.79e+03 R=   3e+03
+Depth=    7720 States=    7e+06 Transitions= 1.79e+09 Memory=   735.361        t= 2.39e+03 R=   3e+03
+Depth=    7720 States=    8e+06 Transitions= 2.11e+09 Memory=   773.545        t= 2.83e+03 R=   3e+03
+Depth=    7720 States=    9e+06 Transitions= 2.49e+09 Memory=   811.143        t= 3.37e+03 R=   3e+03
+pan: resizing hashtable to -w24..  done
+Depth=    7720 States=    1e+07 Transitions= 2.83e+09 Memory=   973.518        t= 3.8e+03 R=   3e+03
+Depth=    7720 States=  1.1e+07 Transitions=  3.2e+09 Memory=  1011.506        t= 4.27e+03 R=   3e+03
+Depth=    7720 States=  1.2e+07 Transitions= 3.59e+09 Memory=  1049.885        t= 4.78e+03 R=   3e+03
+Depth=    7720 States=  1.3e+07 Transitions= 3.81e+09 Memory=  1087.678        t= 5.06e+03 R=   3e+03
+Depth=    7720 States=  1.4e+07 Transitions= 4.12e+09 Memory=  1122.834        t= 5.46e+03 R=   3e+03
+Depth=    7720 States=  1.5e+07 Transitions= 4.35e+09 Memory=  1159.358        t= 5.75e+03 R=   3e+03
+Depth=    7720 States=  1.6e+07 Transitions= 4.88e+09 Memory=  1195.783        t= 6.46e+03 R=   2e+03
+Depth=    7720 States=  1.7e+07 Transitions= 5.67e+09 Memory=  1231.721        t= 7.51e+03 R=   2e+03
+Depth=    7720 States=  1.8e+07 Transitions= 6.31e+09 Memory=  1268.537        t= 8.37e+03 R=   2e+03
+Depth=    7720 States=  1.9e+07 Transitions= 6.77e+09 Memory=  1306.526        t= 8.98e+03 R=   2e+03
+Depth=    7720 States=    2e+07 Transitions= 7.09e+09 Memory=  1345.393        t= 9.41e+03 R=   2e+03
+Depth=    7720 States=  2.1e+07 Transitions= 7.48e+09 Memory=  1383.576        t= 9.93e+03 R=   2e+03
+Depth=    7720 States=  2.2e+07 Transitions= 7.94e+09 Memory=  1421.955        t= 1.06e+04 R=   2e+03
+Depth=    7720 States=  2.3e+07 Transitions= 8.37e+09 Memory=  1459.846        t= 1.11e+04 R=   2e+03
+Depth=    7720 States=  2.4e+07 Transitions= 8.77e+09 Memory=  1497.346        t= 1.17e+04 R=   2e+03
+Depth=    7720 States=  2.5e+07 Transitions= 9.22e+09 Memory=  1535.529        t= 1.23e+04 R=   2e+03
+Depth=    7720 States=  2.6e+07 Transitions= 9.48e+09 Memory=  1574.006        t= 1.26e+04 R=   2e+03
+Depth=    7720 States=  2.7e+07 Transitions= 9.85e+09 Memory=  1612.385        t= 1.31e+04 R=   2e+03
+Depth=    7720 States=  2.8e+07 Transitions= 1.02e+10 Memory=  1650.666        t= 1.37e+04 R=   2e+03
+Depth=    7940 States=  2.9e+07 Transitions= 1.06e+10 Memory=  1688.752        t= 1.41e+04 R=   2e+03
+Depth=    7998 States=    3e+07 Transitions= 1.09e+10 Memory=  1726.936        t= 1.46e+04 R=   2e+03
+Depth=    7998 States=  3.1e+07 Transitions= 1.13e+10 Memory=  1765.315        t= 1.51e+04 R=   2e+03
+Depth=    7998 States=  3.2e+07 Transitions= 1.16e+10 Memory=  1803.498        t= 1.55e+04 R=   2e+03
+Depth=    7998 States=  3.3e+07 Transitions= 1.19e+10 Memory=  1841.682        t= 1.6e+04 R=   2e+03
+Depth=    7998 States=  3.4e+07 Transitions= 1.23e+10 Memory=  1879.963        t= 1.65e+04 R=   2e+03
+pan: resizing hashtable to -w26..  done
+Depth=    7998 States=  3.5e+07 Transitions= 1.26e+10 Memory=  2414.131        t= 1.69e+04 R=   2e+03
+Depth=    7998 States=  3.6e+07 Transitions= 1.29e+10 Memory=  2452.315        t= 1.73e+04 R=   2e+03
+Depth=    7998 States=  3.7e+07 Transitions= 1.32e+10 Memory=  2490.498        t= 1.77e+04 R=   2e+03
+Depth=    7998 States=  3.8e+07 Transitions= 1.35e+10 Memory=  2528.584        t= 1.82e+04 R=   2e+03
+Depth=    7998 States=  3.9e+07 Transitions= 1.39e+10 Memory=  2566.768        t= 1.86e+04 R=   2e+03
+Depth=    7998 States=    4e+07 Transitions= 1.41e+10 Memory=  2604.951        t= 1.89e+04 R=   2e+03
+Depth=    7998 States=  4.1e+07 Transitions= 1.44e+10 Memory=  2643.135        t= 1.93e+04 R=   2e+03
+Depth=    7998 States=  4.2e+07 Transitions= 1.48e+10 Memory=  2682.002        t= 1.98e+04 R=   2e+03
+Depth=    7998 States=  4.3e+07 Transitions= 1.51e+10 Memory=  2720.283        t= 2.03e+04 R=   2e+03
+Depth=    7998 States=  4.4e+07 Transitions= 1.56e+10 Memory=  2759.053        t= 2.09e+04 R=   2e+03
+Depth=    7998 States=  4.5e+07 Transitions= 1.59e+10 Memory=  2797.432        t= 2.13e+04 R=   2e+03
+Depth=    7998 States=  4.6e+07 Transitions= 1.64e+10 Memory=  2836.201        t= 2.19e+04 R=   2e+03
+Depth=    7998 States=  4.7e+07 Transitions= 1.68e+10 Memory=  2875.068        t= 2.24e+04 R=   2e+03
+Depth=    7998 States=  4.8e+07 Transitions= 1.72e+10 Memory=  2913.643        t= 2.29e+04 R=   2e+03
+Depth=    7998 States=  4.9e+07 Transitions= 1.76e+10 Memory=  2952.412        t= 2.34e+04 R=   2e+03
+Depth=    7998 States=    5e+07 Transitions= 1.78e+10 Memory=  2989.619        t= 2.38e+04 R=   2e+03
+Depth=    7998 States=  5.1e+07 Transitions= 1.81e+10 Memory=  3027.901        t= 2.42e+04 R=   2e+03
+Depth=    7998 States=  5.2e+07 Transitions= 1.84e+10 Memory=  3066.279        t= 2.46e+04 R=   2e+03
+Depth=    7998 States=  5.3e+07 Transitions= 1.87e+10 Memory=  3104.463        t= 2.49e+04 R=   2e+03
+Depth=    7998 States=  5.4e+07 Transitions= 1.93e+10 Memory=  3142.842        t= 2.57e+04 R=   2e+03
+Depth=    7998 States=  5.5e+07 Transitions= 2.01e+10 Memory=  3181.026        t= 2.68e+04 R=   2e+03
+Depth=    7998 States=  5.6e+07 Transitions= 2.07e+10 Memory=  3219.990        t= 2.76e+04 R=   2e+03
+Depth=    7998 States=  5.7e+07 Transitions= 2.11e+10 Memory=  3258.467        t= 2.82e+04 R=   2e+03
+Depth=    7998 States=  5.8e+07 Transitions= 2.15e+10 Memory=  3297.236        t= 2.87e+04 R=   2e+03
+Depth=    7998 States=  5.9e+07 Transitions= 2.18e+10 Memory=  3334.151        t= 2.91e+04 R=   2e+03
+Depth=    7998 States=    6e+07 Transitions= 2.22e+10 Memory=  3372.432        t= 2.97e+04 R=   2e+03
+Depth=    7998 States=  6.1e+07 Transitions= 2.27e+10 Memory=  3410.713        t= 3.03e+04 R=   2e+03
+Depth=    7998 States=  6.2e+07 Transitions= 2.32e+10 Memory=  3448.701        t= 3.09e+04 R=   2e+03
+Depth=    7998 States=  6.3e+07 Transitions= 2.35e+10 Memory=  3485.615        t= 3.15e+04 R=   2e+03
+Depth=    7998 States=  6.4e+07 Transitions= 2.38e+10 Memory=  3523.604        t= 3.19e+04 R=   2e+03
+Depth=    7998 States=  6.5e+07 Transitions= 2.42e+10 Memory=  3561.690        t= 3.23e+04 R=   2e+03
+Depth=    7998 States=  6.6e+07 Transitions= 2.46e+10 Memory=  3598.799        t= 3.28e+04 R=   2e+03
+Depth=    7998 States=  6.7e+07 Transitions= 2.49e+10 Memory=  3635.225        t= 3.33e+04 R=   2e+03
+Depth=    7998 States=  6.8e+07 Transitions= 2.53e+10 Memory=  3672.139        t= 3.38e+04 R=   2e+03
+Depth=    7998 States=  6.9e+07 Transitions= 2.56e+10 Memory=  3706.807        t= 3.42e+04 R=   2e+03
+Depth=    7998 States=    7e+07 Transitions= 2.59e+10 Memory=  3743.916        t= 3.47e+04 R=   2e+03
+Depth=    7998 States=  7.1e+07 Transitions= 2.62e+10 Memory=  3781.026        t= 3.51e+04 R=   2e+03
+Depth=    7998 States=  7.2e+07 Transitions= 2.66e+10 Memory=  3818.721        t= 3.56e+04 R=   2e+03
+Depth=    7998 States=  7.3e+07 Transitions= 2.68e+10 Memory=  3855.244        t= 3.59e+04 R=   2e+03
+Depth=    7998 States=  7.4e+07 Transitions= 2.72e+10 Memory=  3892.647        t= 3.64e+04 R=   2e+03
+Depth=    7998 States=  7.5e+07 Transitions= 2.76e+10 Memory=  3930.049        t= 3.69e+04 R=   2e+03
+Depth=    7998 States=  7.6e+07 Transitions= 2.78e+10 Memory=  3966.963        t= 3.72e+04 R=   2e+03
+Depth=    7998 States=  7.7e+07 Transitions= 2.81e+10 Memory=  4003.975        t= 3.77e+04 R=   2e+03
+Depth=    7998 States=  7.8e+07 Transitions= 2.84e+10 Memory=  4041.084        t= 3.8e+04 R=   2e+03
+Depth=    7998 States=  7.9e+07 Transitions= 2.87e+10 Memory=  4078.584        t= 3.84e+04 R=   2e+03
+Depth=    7998 States=    8e+07 Transitions= 2.91e+10 Memory=  4114.815        t= 3.9e+04 R=   2e+03
+Depth=    7998 States=  8.1e+07 Transitions= 2.95e+10 Memory=  4151.240        t= 3.95e+04 R=   2e+03
+Depth=    7998 States=  8.2e+07 Transitions= 2.99e+10 Memory=  4189.131        t=  4e+04 R=   2e+03
+Depth=    7998 States=  8.3e+07 Transitions= 3.03e+10 Memory=  4226.533        t= 4.06e+04 R=   2e+03
+Depth=    7998 States=  8.4e+07 Transitions= 3.07e+10 Memory=  4264.912        t= 4.11e+04 R=   2e+03
+Depth=    7998 States=  8.5e+07 Transitions= 3.11e+10 Memory=  4302.998        t= 4.16e+04 R=   2e+03
+Depth=    7998 States=  8.6e+07 Transitions= 3.15e+10 Memory=  4340.693        t= 4.21e+04 R=   2e+03
+Depth=    7998 States=  8.7e+07 Transitions= 3.19e+10 Memory=  4378.877        t= 4.27e+04 R=   2e+03
+Depth=    7998 States=  8.8e+07 Transitions= 3.23e+10 Memory=  4417.061        t= 4.32e+04 R=   2e+03
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             - (none specified)
+       assertion violations    +
+       cycle checks            - (disabled by -DSAFETY)
+       invalid end states      +
+
+State-vector 72 byte, depth reached 7998, errors: 0
+ 88716525 states, stored
+3.2432758e+10 states, matched
+3.2521475e+10 transitions (= stored+matched)
+1.8325967e+11 atomic steps
+hash conflicts: 1.7127982e+10 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 8460.667      equivalent memory usage for states (stored*(State-vector + overhead))
+ 3474.757      actual memory usage for states (compression: 41.07%)
+               state-vector as stored = 13 byte + 28 byte overhead
+  512.000      memory used for hash table (-w26)
+  457.764      memory used for DFS stack (-m10000000)
+ 4444.111      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 606546 5194 3779 2 ]
+unreached in proctype urcu_reader
+       line 267, ".input.spin", state 57, "cache_dirty_urcu_gp_ctr = 0"
+       line 275, ".input.spin", state 79, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 88, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 104, "(1)"
+       line 248, ".input.spin", state 112, "(1)"
+       line 252, ".input.spin", state 124, "(1)"
+       line 256, ".input.spin", state 132, "(1)"
+       line 406, ".input.spin", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 190, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 204, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 223, "(1)"
+       line 433, ".input.spin", state 253, "(1)"
+       line 437, ".input.spin", state 266, "(1)"
+       line 686, ".input.spin", state 287, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 406, ".input.spin", state 294, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 326, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 340, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 359, "(1)"
+       line 433, ".input.spin", state 389, "(1)"
+       line 437, ".input.spin", state 402, "(1)"
+       line 406, ".input.spin", state 423, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 455, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 469, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 488, "(1)"
+       line 433, ".input.spin", state 518, "(1)"
+       line 437, ".input.spin", state 531, "(1)"
+       line 406, ".input.spin", state 554, "cache_dirty_urcu_gp_ctr = 0"
+       line 406, ".input.spin", state 556, "(1)"
+       line 406, ".input.spin", state 557, "(cache_dirty_urcu_gp_ctr)"
+       line 406, ".input.spin", state 557, "else"
+       line 406, ".input.spin", state 560, "(1)"
+       line 410, ".input.spin", state 568, "cache_dirty_urcu_active_readers = 0"
+       line 410, ".input.spin", state 570, "(1)"
+       line 410, ".input.spin", state 571, "(cache_dirty_urcu_active_readers)"
+       line 410, ".input.spin", state 571, "else"
+       line 410, ".input.spin", state 574, "(1)"
+       line 410, ".input.spin", state 575, "(1)"
+       line 410, ".input.spin", state 575, "(1)"
+       line 408, ".input.spin", state 580, "((i<1))"
+       line 408, ".input.spin", state 580, "((i>=1))"
+       line 415, ".input.spin", state 586, "cache_dirty_rcu_ptr = 0"
+       line 415, ".input.spin", state 588, "(1)"
+       line 415, ".input.spin", state 589, "(cache_dirty_rcu_ptr)"
+       line 415, ".input.spin", state 589, "else"
+       line 415, ".input.spin", state 592, "(1)"
+       line 415, ".input.spin", state 593, "(1)"
+       line 415, ".input.spin", state 593, "(1)"
+       line 419, ".input.spin", state 600, "cache_dirty_rcu_data[i] = 0"
+       line 419, ".input.spin", state 602, "(1)"
+       line 419, ".input.spin", state 603, "(cache_dirty_rcu_data[i])"
+       line 419, ".input.spin", state 603, "else"
+       line 419, ".input.spin", state 606, "(1)"
+       line 419, ".input.spin", state 607, "(1)"
+       line 419, ".input.spin", state 607, "(1)"
+       line 417, ".input.spin", state 612, "((i<2))"
+       line 417, ".input.spin", state 612, "((i>=2))"
+       line 424, ".input.spin", state 619, "(1)"
+       line 424, ".input.spin", state 620, "(!(cache_dirty_urcu_gp_ctr))"
+       line 424, ".input.spin", state 620, "else"
+       line 424, ".input.spin", state 623, "(1)"
+       line 424, ".input.spin", state 624, "(1)"
+       line 424, ".input.spin", state 624, "(1)"
+       line 428, ".input.spin", state 632, "(1)"
+       line 428, ".input.spin", state 633, "(!(cache_dirty_urcu_active_readers))"
+       line 428, ".input.spin", state 633, "else"
+       line 428, ".input.spin", state 636, "(1)"
+       line 428, ".input.spin", state 637, "(1)"
+       line 428, ".input.spin", state 637, "(1)"
+       line 426, ".input.spin", state 642, "((i<1))"
+       line 426, ".input.spin", state 642, "((i>=1))"
+       line 433, ".input.spin", state 649, "(1)"
+       line 433, ".input.spin", state 650, "(!(cache_dirty_rcu_ptr))"
+       line 433, ".input.spin", state 650, "else"
+       line 433, ".input.spin", state 653, "(1)"
+       line 433, ".input.spin", state 654, "(1)"
+       line 433, ".input.spin", state 654, "(1)"
+       line 437, ".input.spin", state 662, "(1)"
+       line 437, ".input.spin", state 663, "(!(cache_dirty_rcu_data[i]))"
+       line 437, ".input.spin", state 663, "else"
+       line 437, ".input.spin", state 666, "(1)"
+       line 437, ".input.spin", state 667, "(1)"
+       line 437, ".input.spin", state 667, "(1)"
+       line 435, ".input.spin", state 672, "((i<2))"
+       line 435, ".input.spin", state 672, "((i>=2))"
+       line 445, ".input.spin", state 676, "(1)"
+       line 445, ".input.spin", state 676, "(1)"
+       line 686, ".input.spin", state 679, "cached_urcu_active_readers = (tmp+1)"
+       line 686, ".input.spin", state 680, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 686, ".input.spin", state 681, "(1)"
+       line 406, ".input.spin", state 688, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 720, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 734, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 753, "(1)"
+       line 433, ".input.spin", state 783, "(1)"
+       line 437, ".input.spin", state 796, "(1)"
+       line 406, ".input.spin", state 824, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 856, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 870, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 889, "(1)"
+       line 433, ".input.spin", state 919, "(1)"
+       line 437, ".input.spin", state 932, "(1)"
+       line 406, ".input.spin", state 953, "cache_dirty_urcu_gp_ctr = 0"
+       line 406, ".input.spin", state 955, "(1)"
+       line 406, ".input.spin", state 956, "(cache_dirty_urcu_gp_ctr)"
+       line 406, ".input.spin", state 956, "else"
+       line 406, ".input.spin", state 959, "(1)"
+       line 410, ".input.spin", state 967, "cache_dirty_urcu_active_readers = 0"
+       line 410, ".input.spin", state 969, "(1)"
+       line 410, ".input.spin", state 970, "(cache_dirty_urcu_active_readers)"
+       line 410, ".input.spin", state 970, "else"
+       line 410, ".input.spin", state 973, "(1)"
+       line 410, ".input.spin", state 974, "(1)"
+       line 410, ".input.spin", state 974, "(1)"
+       line 408, ".input.spin", state 979, "((i<1))"
+       line 408, ".input.spin", state 979, "((i>=1))"
+       line 415, ".input.spin", state 985, "cache_dirty_rcu_ptr = 0"
+       line 415, ".input.spin", state 987, "(1)"
+       line 415, ".input.spin", state 988, "(cache_dirty_rcu_ptr)"
+       line 415, ".input.spin", state 988, "else"
+       line 415, ".input.spin", state 991, "(1)"
+       line 415, ".input.spin", state 992, "(1)"
+       line 415, ".input.spin", state 992, "(1)"
+       line 419, ".input.spin", state 999, "cache_dirty_rcu_data[i] = 0"
+       line 419, ".input.spin", state 1001, "(1)"
+       line 419, ".input.spin", state 1002, "(cache_dirty_rcu_data[i])"
+       line 419, ".input.spin", state 1002, "else"
+       line 419, ".input.spin", state 1005, "(1)"
+       line 419, ".input.spin", state 1006, "(1)"
+       line 419, ".input.spin", state 1006, "(1)"
+       line 417, ".input.spin", state 1011, "((i<2))"
+       line 417, ".input.spin", state 1011, "((i>=2))"
+       line 424, ".input.spin", state 1018, "(1)"
+       line 424, ".input.spin", state 1019, "(!(cache_dirty_urcu_gp_ctr))"
+       line 424, ".input.spin", state 1019, "else"
+       line 424, ".input.spin", state 1022, "(1)"
+       line 424, ".input.spin", state 1023, "(1)"
+       line 424, ".input.spin", state 1023, "(1)"
+       line 428, ".input.spin", state 1031, "(1)"
+       line 428, ".input.spin", state 1032, "(!(cache_dirty_urcu_active_readers))"
+       line 428, ".input.spin", state 1032, "else"
+       line 428, ".input.spin", state 1035, "(1)"
+       line 428, ".input.spin", state 1036, "(1)"
+       line 428, ".input.spin", state 1036, "(1)"
+       line 426, ".input.spin", state 1041, "((i<1))"
+       line 426, ".input.spin", state 1041, "((i>=1))"
+       line 433, ".input.spin", state 1048, "(1)"
+       line 433, ".input.spin", state 1049, "(!(cache_dirty_rcu_ptr))"
+       line 433, ".input.spin", state 1049, "else"
+       line 433, ".input.spin", state 1052, "(1)"
+       line 433, ".input.spin", state 1053, "(1)"
+       line 433, ".input.spin", state 1053, "(1)"
+       line 437, ".input.spin", state 1061, "(1)"
+       line 437, ".input.spin", state 1062, "(!(cache_dirty_rcu_data[i]))"
+       line 437, ".input.spin", state 1062, "else"
+       line 437, ".input.spin", state 1065, "(1)"
+       line 437, ".input.spin", state 1066, "(1)"
+       line 437, ".input.spin", state 1066, "(1)"
+       line 435, ".input.spin", state 1071, "((i<2))"
+       line 435, ".input.spin", state 1071, "((i>=2))"
+       line 445, ".input.spin", state 1075, "(1)"
+       line 445, ".input.spin", state 1075, "(1)"
+       line 694, ".input.spin", state 1079, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 406, ".input.spin", state 1084, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 1116, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 1130, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 1149, "(1)"
+       line 433, ".input.spin", state 1179, "(1)"
+       line 437, ".input.spin", state 1192, "(1)"
+       line 406, ".input.spin", state 1216, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 1248, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 1262, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 1281, "(1)"
+       line 433, ".input.spin", state 1311, "(1)"
+       line 437, ".input.spin", state 1324, "(1)"
+       line 406, ".input.spin", state 1349, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 1381, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 1395, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 1414, "(1)"
+       line 433, ".input.spin", state 1444, "(1)"
+       line 437, ".input.spin", state 1457, "(1)"
+       line 406, ".input.spin", state 1478, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 1510, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 1524, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 1543, "(1)"
+       line 433, ".input.spin", state 1573, "(1)"
+       line 437, ".input.spin", state 1586, "(1)"
+       line 406, ".input.spin", state 1612, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 1644, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 1658, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 1677, "(1)"
+       line 433, ".input.spin", state 1707, "(1)"
+       line 437, ".input.spin", state 1720, "(1)"
+       line 406, ".input.spin", state 1741, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 1773, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 1787, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 1806, "(1)"
+       line 433, ".input.spin", state 1836, "(1)"
+       line 437, ".input.spin", state 1849, "(1)"
+       line 406, ".input.spin", state 1873, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 1905, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 1919, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 1938, "(1)"
+       line 433, ".input.spin", state 1968, "(1)"
+       line 437, ".input.spin", state 1981, "(1)"
+       line 733, ".input.spin", state 2002, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 406, ".input.spin", state 2009, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 2041, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 2055, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 2074, "(1)"
+       line 433, ".input.spin", state 2104, "(1)"
+       line 437, ".input.spin", state 2117, "(1)"
+       line 406, ".input.spin", state 2138, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 2170, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 2184, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 2203, "(1)"
+       line 433, ".input.spin", state 2233, "(1)"
+       line 437, ".input.spin", state 2246, "(1)"
+       line 406, ".input.spin", state 2269, "cache_dirty_urcu_gp_ctr = 0"
+       line 406, ".input.spin", state 2271, "(1)"
+       line 406, ".input.spin", state 2272, "(cache_dirty_urcu_gp_ctr)"
+       line 406, ".input.spin", state 2272, "else"
+       line 406, ".input.spin", state 2275, "(1)"
+       line 410, ".input.spin", state 2283, "cache_dirty_urcu_active_readers = 0"
+       line 410, ".input.spin", state 2285, "(1)"
+       line 410, ".input.spin", state 2286, "(cache_dirty_urcu_active_readers)"
+       line 410, ".input.spin", state 2286, "else"
+       line 410, ".input.spin", state 2289, "(1)"
+       line 410, ".input.spin", state 2290, "(1)"
+       line 410, ".input.spin", state 2290, "(1)"
+       line 408, ".input.spin", state 2295, "((i<1))"
+       line 408, ".input.spin", state 2295, "((i>=1))"
+       line 415, ".input.spin", state 2301, "cache_dirty_rcu_ptr = 0"
+       line 415, ".input.spin", state 2303, "(1)"
+       line 415, ".input.spin", state 2304, "(cache_dirty_rcu_ptr)"
+       line 415, ".input.spin", state 2304, "else"
+       line 415, ".input.spin", state 2307, "(1)"
+       line 415, ".input.spin", state 2308, "(1)"
+       line 415, ".input.spin", state 2308, "(1)"
+       line 419, ".input.spin", state 2315, "cache_dirty_rcu_data[i] = 0"
+       line 419, ".input.spin", state 2317, "(1)"
+       line 419, ".input.spin", state 2318, "(cache_dirty_rcu_data[i])"
+       line 419, ".input.spin", state 2318, "else"
+       line 419, ".input.spin", state 2321, "(1)"
+       line 419, ".input.spin", state 2322, "(1)"
+       line 419, ".input.spin", state 2322, "(1)"
+       line 417, ".input.spin", state 2327, "((i<2))"
+       line 417, ".input.spin", state 2327, "((i>=2))"
+       line 424, ".input.spin", state 2334, "(1)"
+       line 424, ".input.spin", state 2335, "(!(cache_dirty_urcu_gp_ctr))"
+       line 424, ".input.spin", state 2335, "else"
+       line 424, ".input.spin", state 2338, "(1)"
+       line 424, ".input.spin", state 2339, "(1)"
+       line 424, ".input.spin", state 2339, "(1)"
+       line 428, ".input.spin", state 2347, "(1)"
+       line 428, ".input.spin", state 2348, "(!(cache_dirty_urcu_active_readers))"
+       line 428, ".input.spin", state 2348, "else"
+       line 428, ".input.spin", state 2351, "(1)"
+       line 428, ".input.spin", state 2352, "(1)"
+       line 428, ".input.spin", state 2352, "(1)"
+       line 426, ".input.spin", state 2357, "((i<1))"
+       line 426, ".input.spin", state 2357, "((i>=1))"
+       line 433, ".input.spin", state 2364, "(1)"
+       line 433, ".input.spin", state 2365, "(!(cache_dirty_rcu_ptr))"
+       line 433, ".input.spin", state 2365, "else"
+       line 433, ".input.spin", state 2368, "(1)"
+       line 433, ".input.spin", state 2369, "(1)"
+       line 433, ".input.spin", state 2369, "(1)"
+       line 437, ".input.spin", state 2377, "(1)"
+       line 437, ".input.spin", state 2378, "(!(cache_dirty_rcu_data[i]))"
+       line 437, ".input.spin", state 2378, "else"
+       line 437, ".input.spin", state 2381, "(1)"
+       line 437, ".input.spin", state 2382, "(1)"
+       line 437, ".input.spin", state 2382, "(1)"
+       line 435, ".input.spin", state 2387, "((i<2))"
+       line 435, ".input.spin", state 2387, "((i>=2))"
+       line 445, ".input.spin", state 2391, "(1)"
+       line 445, ".input.spin", state 2391, "(1)"
+       line 733, ".input.spin", state 2394, "cached_urcu_active_readers = (tmp+1)"
+       line 733, ".input.spin", state 2395, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 733, ".input.spin", state 2396, "(1)"
+       line 406, ".input.spin", state 2403, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 2435, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 2449, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 2468, "(1)"
+       line 433, ".input.spin", state 2498, "(1)"
+       line 437, ".input.spin", state 2511, "(1)"
+       line 406, ".input.spin", state 2538, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 2570, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 2584, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 2603, "(1)"
+       line 433, ".input.spin", state 2633, "(1)"
+       line 437, ".input.spin", state 2646, "(1)"
+       line 406, ".input.spin", state 2667, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, ".input.spin", state 2699, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 2713, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 2732, "(1)"
+       line 433, ".input.spin", state 2762, "(1)"
+       line 437, ".input.spin", state 2775, "(1)"
+       line 244, ".input.spin", state 2808, "(1)"
+       line 252, ".input.spin", state 2828, "(1)"
+       line 256, ".input.spin", state 2836, "(1)"
+       line 244, ".input.spin", state 2851, "(1)"
+       line 252, ".input.spin", state 2871, "(1)"
+       line 256, ".input.spin", state 2879, "(1)"
+       line 928, ".input.spin", state 2896, "-end-"
+       (245 of 2896 states)
+unreached in proctype urcu_writer
+       line 406, ".input.spin", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 410, ".input.spin", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 415, ".input.spin", state 77, "cache_dirty_rcu_ptr = 0"
+       line 424, ".input.spin", state 110, "(1)"
+       line 428, ".input.spin", state 123, "(1)"
+       line 433, ".input.spin", state 140, "(1)"
+       line 267, ".input.spin", state 176, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 185, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 198, "cache_dirty_rcu_ptr = 0"
+       line 406, ".input.spin", state 238, "cache_dirty_urcu_gp_ctr = 0"
+       line 410, ".input.spin", state 252, "cache_dirty_urcu_active_readers = 0"
+       line 415, ".input.spin", state 270, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 284, "cache_dirty_rcu_data[i] = 0"
+       line 424, ".input.spin", state 303, "(1)"
+       line 428, ".input.spin", state 316, "(1)"
+       line 433, ".input.spin", state 333, "(1)"
+       line 437, ".input.spin", state 346, "(1)"
+       line 410, ".input.spin", state 383, "cache_dirty_urcu_active_readers = 0"
+       line 415, ".input.spin", state 401, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 415, "cache_dirty_rcu_data[i] = 0"
+       line 428, ".input.spin", state 447, "(1)"
+       line 433, ".input.spin", state 464, "(1)"
+       line 437, ".input.spin", state 477, "(1)"
+       line 410, ".input.spin", state 522, "cache_dirty_urcu_active_readers = 0"
+       line 415, ".input.spin", state 540, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 554, "cache_dirty_rcu_data[i] = 0"
+       line 428, ".input.spin", state 586, "(1)"
+       line 433, ".input.spin", state 603, "(1)"
+       line 437, ".input.spin", state 616, "(1)"
+       line 410, ".input.spin", state 651, "cache_dirty_urcu_active_readers = 0"
+       line 415, ".input.spin", state 669, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 683, "cache_dirty_rcu_data[i] = 0"
+       line 428, ".input.spin", state 715, "(1)"
+       line 433, ".input.spin", state 732, "(1)"
+       line 437, ".input.spin", state 745, "(1)"
+       line 410, ".input.spin", state 782, "cache_dirty_urcu_active_readers = 0"
+       line 415, ".input.spin", state 800, "cache_dirty_rcu_ptr = 0"
+       line 419, ".input.spin", state 814, "cache_dirty_rcu_data[i] = 0"
+       line 428, ".input.spin", state 846, "(1)"
+       line 433, ".input.spin", state 863, "(1)"
+       line 437, ".input.spin", state 876, "(1)"
+       line 267, ".input.spin", state 931, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 940, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 955, "(1)"
+       line 279, ".input.spin", state 962, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 978, "(1)"
+       line 248, ".input.spin", state 986, "(1)"
+       line 252, ".input.spin", state 998, "(1)"
+       line 256, ".input.spin", state 1006, "(1)"
+       line 267, ".input.spin", state 1037, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 1046, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1059, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1068, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1084, "(1)"
+       line 248, ".input.spin", state 1092, "(1)"
+       line 252, ".input.spin", state 1104, "(1)"
+       line 256, ".input.spin", state 1112, "(1)"
+       line 271, ".input.spin", state 1138, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1151, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1160, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1176, "(1)"
+       line 248, ".input.spin", state 1184, "(1)"
+       line 252, ".input.spin", state 1196, "(1)"
+       line 256, ".input.spin", state 1204, "(1)"
+       line 267, ".input.spin", state 1235, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 1244, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1257, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1266, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1282, "(1)"
+       line 248, ".input.spin", state 1290, "(1)"
+       line 252, ".input.spin", state 1302, "(1)"
+       line 256, ".input.spin", state 1310, "(1)"
+       line 271, ".input.spin", state 1336, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1349, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1358, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1374, "(1)"
+       line 248, ".input.spin", state 1382, "(1)"
+       line 252, ".input.spin", state 1394, "(1)"
+       line 256, ".input.spin", state 1402, "(1)"
+       line 267, ".input.spin", state 1433, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 1442, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1455, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1464, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1480, "(1)"
+       line 248, ".input.spin", state 1488, "(1)"
+       line 252, ".input.spin", state 1500, "(1)"
+       line 256, ".input.spin", state 1508, "(1)"
+       line 271, ".input.spin", state 1534, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1547, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1556, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1572, "(1)"
+       line 248, ".input.spin", state 1580, "(1)"
+       line 252, ".input.spin", state 1592, "(1)"
+       line 256, ".input.spin", state 1600, "(1)"
+       line 267, ".input.spin", state 1631, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, ".input.spin", state 1640, "cache_dirty_urcu_active_readers = 0"
+       line 275, ".input.spin", state 1653, "cache_dirty_rcu_ptr = 0"
+       line 279, ".input.spin", state 1662, "cache_dirty_rcu_data[i] = 0"
+       line 244, ".input.spin", state 1678, "(1)"
+       line 248, ".input.spin", state 1686, "(1)"
+       line 252, ".input.spin", state 1698, "(1)"
+       line 256, ".input.spin", state 1706, "(1)"
+       line 1303, ".input.spin", state 1722, "-end-"
+       (103 of 1722 states)
+unreached in proctype :init:
+       (0 of 28 states)
+
+pan: elapsed time 4.35e+04 seconds
+pan: rate 2039.1355 states/second
+pan: avg transition delay 1.3378e-06 usec
+cp .input.spin asserts.spin.input
+cp .input.spin.trail asserts.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/asserts.spin.input b/formal-model/urcu-controldataflow-alpha-ipi/asserts.spin.input
new file mode 100644 (file)
index 0000000..ca70e6c
--- /dev/null
@@ -0,0 +1,1339 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/references.txt b/formal-model/urcu-controldataflow-alpha-ipi/references.txt
new file mode 100644 (file)
index 0000000..72c67a2
--- /dev/null
@@ -0,0 +1,13 @@
+http://spinroot.com/spin/Man/ltl.html
+http://en.wikipedia.org/wiki/Linear_temporal_logic
+http://www.dcs.gla.ac.uk/~muffy/MRS4-2002/lect11.ppt
+
+http://www.lsv.ens-cachan.fr/~gastin/ltl2ba/index.php
+http://spinroot.com/spin/Man/index.html
+http://spinroot.com/spin/Man/promela.html
+
+LTL vs CTL :
+
+http://spinroot.com/spin/Doc/course/lecture12.pdf p. 9, p. 15, p. 18
+http://www-i2.informatik.rwth-aachen.de/i2/fileadmin/user_upload/documents/Introduction_to_Model_Checking/mc_lec18.pdf
+  (downloaded)
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu.sh b/formal-model/urcu-controldataflow-alpha-ipi/urcu.sh
new file mode 100644 (file)
index 0000000..65ff517
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Compiles and runs the urcu.spin Promela model.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) IBM Corporation, 2009
+#               Mathieu Desnoyers, 2009
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+#          Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+# Basic execution, without LTL clauses. See Makefile.
+
+spin -a urcu.spin
+cc -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w21
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu.spin b/formal-model/urcu-controldataflow-alpha-ipi/urcu.spin
new file mode 100644 (file)
index 0000000..8075506
--- /dev/null
@@ -0,0 +1,1321 @@
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free.log b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free.log
new file mode 100644 (file)
index 0000000..6b6e98b
--- /dev/null
@@ -0,0 +1,560 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1361)
+Depth=    8053 States=    1e+06 Transitions= 1.77e+08 Memory=   513.029        t=    265 R=   4e+03
+Depth=    9797 States=    2e+06 Transitions= 3.71e+08 Memory=   559.416        t=    575 R=   3e+03
+Depth=    9797 States=    3e+06 Transitions=  5.8e+08 Memory=   605.901        t=    922 R=   3e+03
+pan: resizing hashtable to -w22..  done
+Depth=    9797 States=    4e+06 Transitions=  7.6e+08 Memory=   682.920        t= 1.2e+03 R=   3e+03
+Depth=    9797 States=    5e+06 Transitions= 9.44e+08 Memory=   728.721        t= 1.48e+03 R=   3e+03
+Depth=    9797 States=    6e+06 Transitions= 1.35e+09 Memory=   775.303        t= 2.13e+03 R=   3e+03
+Depth=    9797 States=    7e+06 Transitions= 1.79e+09 Memory=   821.885        t= 2.85e+03 R=   2e+03
+Depth=    9797 States=    8e+06 Transitions= 2.11e+09 Memory=   868.076        t= 3.36e+03 R=   2e+03
+Depth=    9797 States=    9e+06 Transitions= 2.49e+09 Memory=   914.658        t= 4.01e+03 R=   2e+03
+pan: resizing hashtable to -w24..  done
+Depth=    9797 States=    1e+07 Transitions= 2.83e+09 Memory=  1085.529        t= 4.52e+03 R=   2e+03
+Depth=    9797 States=  1.1e+07 Transitions=  3.2e+09 Memory=  1132.697        t= 5.08e+03 R=   2e+03
+Depth=    9797 States=  1.2e+07 Transitions= 3.59e+09 Memory=  1179.670        t= 5.69e+03 R=   2e+03
+Depth=    9797 States=  1.3e+07 Transitions= 3.81e+09 Memory=  1226.838        t= 6.03e+03 R=   2e+03
+Depth=    9797 States=  1.4e+07 Transitions= 4.12e+09 Memory=  1273.029        t= 6.5e+03 R=   2e+03
+Depth=    9797 States=  1.5e+07 Transitions= 4.35e+09 Memory=  1319.123        t= 6.85e+03 R=   2e+03
+Depth=    9797 States=  1.6e+07 Transitions= 4.88e+09 Memory=  1365.608        t= 7.7e+03 R=   2e+03
+Depth=    9797 States=  1.7e+07 Transitions= 5.67e+09 Memory=  1411.506        t= 8.95e+03 R=   2e+03
+Depth=    9797 States=  1.8e+07 Transitions= 6.31e+09 Memory=  1458.479        t= 9.98e+03 R=   2e+03
+Depth=    9797 States=  1.9e+07 Transitions= 6.77e+09 Memory=  1504.963        t= 1.07e+04 R=   2e+03
+Depth=    9797 States=    2e+07 Transitions= 7.09e+09 Memory=  1552.131        t= 1.12e+04 R=   2e+03
+Depth=    9797 States=  2.1e+07 Transitions= 7.48e+09 Memory=  1598.615        t= 1.18e+04 R=   2e+03
+Depth=    9797 States=  2.2e+07 Transitions= 7.94e+09 Memory=  1645.295        t= 1.26e+04 R=   2e+03
+Depth=    9797 States=  2.3e+07 Transitions= 8.37e+09 Memory=  1691.486        t= 1.33e+04 R=   2e+03
+Depth=    9797 States=  2.4e+07 Transitions= 8.77e+09 Memory=  1737.678        t= 1.39e+04 R=   2e+03
+Depth=    9797 States=  2.5e+07 Transitions= 9.22e+09 Memory=  1783.967        t= 1.46e+04 R=   2e+03
+Depth=    9797 States=  2.6e+07 Transitions= 9.48e+09 Memory=  1830.061        t= 1.5e+04 R=   2e+03
+Depth=    9797 States=  2.7e+07 Transitions= 9.85e+09 Memory=  1876.350        t= 1.56e+04 R=   2e+03
+Depth=    9797 States=  2.8e+07 Transitions= 1.02e+10 Memory=  1922.639        t= 1.62e+04 R=   2e+03
+Depth=    9919 States=  2.9e+07 Transitions= 1.06e+10 Memory=  1968.537        t= 1.68e+04 R=   2e+03
+Depth=    9963 States=    3e+07 Transitions= 1.09e+10 Memory=  2014.338        t= 1.74e+04 R=   2e+03
+Depth=    9963 States=  3.1e+07 Transitions= 1.13e+10 Memory=  2060.334        t= 1.79e+04 R=   2e+03
+Depth=    9963 States=  3.2e+07 Transitions= 1.16e+10 Memory=  2106.233        t= 1.85e+04 R=   2e+03
+Depth=    9963 States=  3.3e+07 Transitions= 1.19e+10 Memory=  2152.033        t= 1.9e+04 R=   2e+03
+Depth=    9963 States=  3.4e+07 Transitions= 1.23e+10 Memory=  2198.029        t= 1.96e+04 R=   2e+03
+pan: resizing hashtable to -w26..  done
+Depth=    9963 States=  3.5e+07 Transitions= 1.26e+10 Memory=  2739.912        t=  2e+04 R=   2e+03
+Depth=    9963 States=  3.6e+07 Transitions= 1.29e+10 Memory=  2785.713        t= 2.06e+04 R=   2e+03
+Depth=    9963 States=  3.7e+07 Transitions= 1.32e+10 Memory=  2831.416        t= 2.1e+04 R=   2e+03
+Depth=    9963 States=  3.8e+07 Transitions= 1.35e+10 Memory=  2877.217        t= 2.15e+04 R=   2e+03
+Depth=    9963 States=  3.9e+07 Transitions= 1.39e+10 Memory=  2923.018        t= 2.2e+04 R=   2e+03
+Depth=    9963 States=    4e+07 Transitions= 1.41e+10 Memory=  2968.818        t= 2.24e+04 R=   2e+03
+Depth=    9963 States=  4.1e+07 Transitions= 1.44e+10 Memory=  3014.717        t= 2.29e+04 R=   2e+03
+Depth=    9963 States=  4.2e+07 Transitions= 1.48e+10 Memory=  3061.299        t= 2.35e+04 R=   2e+03
+Depth=    9963 States=  4.3e+07 Transitions= 1.51e+10 Memory=  3107.295        t= 2.4e+04 R=   2e+03
+Depth=    9963 States=  4.4e+07 Transitions= 1.56e+10 Memory=  3153.779        t= 2.48e+04 R=   2e+03
+Depth=    9963 States=  4.5e+07 Transitions= 1.59e+10 Memory=  3199.873        t= 2.53e+04 R=   2e+03
+Depth=    9963 States=  4.6e+07 Transitions= 1.64e+10 Memory=  3246.553        t= 2.59e+04 R=   2e+03
+Depth=    9963 States=  4.7e+07 Transitions= 1.68e+10 Memory=  3293.623        t= 2.66e+04 R=   2e+03
+Depth=    9963 States=  4.8e+07 Transitions= 1.72e+10 Memory=  3339.912        t= 2.72e+04 R=   2e+03
+Depth=    9963 States=  4.9e+07 Transitions= 1.76e+10 Memory=  3386.494        t= 2.78e+04 R=   2e+03
+Depth=    9963 States=    5e+07 Transitions= 1.78e+10 Memory=  3433.076        t= 2.82e+04 R=   2e+03
+Depth=    9963 States=  5.1e+07 Transitions= 1.81e+10 Memory=  3479.072        t= 2.87e+04 R=   2e+03
+Depth=    9963 States=  5.2e+07 Transitions= 1.84e+10 Memory=  3525.068        t= 2.91e+04 R=   2e+03
+Depth=    9963 States=  5.3e+07 Transitions= 1.87e+10 Memory=  3570.869        t= 2.95e+04 R=   2e+03
+Depth=    9963 States=  5.4e+07 Transitions= 1.93e+10 Memory=  3616.865        t= 3.05e+04 R=   2e+03
+Depth=    9963 States=  5.5e+07 Transitions= 2.01e+10 Memory=  3662.764        t= 3.17e+04 R=   2e+03
+Depth=    9963 States=  5.6e+07 Transitions= 2.07e+10 Memory=  3709.541        t= 3.27e+04 R=   2e+03
+Depth=    9963 States=  5.7e+07 Transitions= 2.11e+10 Memory=  3755.635        t= 3.34e+04 R=   2e+03
+Depth=    9963 States=  5.8e+07 Transitions= 2.15e+10 Memory=  3802.315        t= 3.4e+04 R=   2e+03
+Depth=    9963 States=  5.9e+07 Transitions= 2.18e+10 Memory=  3848.408        t= 3.45e+04 R=   2e+03
+Depth=    9963 States=    6e+07 Transitions= 2.22e+10 Memory=  3894.404        t= 3.52e+04 R=   2e+03
+Depth=    9963 States=  6.1e+07 Transitions= 2.27e+10 Memory=  3940.596        t= 3.59e+04 R=   2e+03
+Depth=    9963 States=  6.2e+07 Transitions= 2.32e+10 Memory=  3986.494        t= 3.66e+04 R=   2e+03
+Depth=    9963 States=  6.3e+07 Transitions= 2.35e+10 Memory=  4032.295        t= 3.73e+04 R=   2e+03
+Depth=    9963 States=  6.4e+07 Transitions= 2.38e+10 Memory=  4078.193        t= 3.77e+04 R=   2e+03
+Depth=    9963 States=  6.5e+07 Transitions= 2.42e+10 Memory=  4124.092        t= 3.83e+04 R=   2e+03
+Depth=    9963 States=  6.6e+07 Transitions= 2.46e+10 Memory=  4169.990        t= 3.89e+04 R=   2e+03
+Depth=    9963 States=  6.7e+07 Transitions= 2.49e+10 Memory=  4215.791        t= 3.94e+04 R=   2e+03
+Depth=    9963 States=  6.8e+07 Transitions= 2.53e+10 Memory=  4261.494        t=  4e+04 R=   2e+03
+Depth=    9963 States=  6.9e+07 Transitions= 2.56e+10 Memory=  4307.295        t= 4.04e+04 R=   2e+03
+Depth=    9963 States=    7e+07 Transitions= 2.59e+10 Memory=  4353.096        t= 4.11e+04 R=   2e+03
+Depth=    9963 States=  7.1e+07 Transitions= 2.62e+10 Memory=  4398.897        t= 4.15e+04 R=   2e+03
+Depth=    9963 States=  7.2e+07 Transitions= 2.66e+10 Memory=  4444.697        t= 4.21e+04 R=   2e+03
+Depth=    9963 States=  7.3e+07 Transitions= 2.68e+10 Memory=  4490.498        t= 4.25e+04 R=   2e+03
+Depth=    9963 States=  7.4e+07 Transitions= 2.72e+10 Memory=  4536.299        t= 4.31e+04 R=   2e+03
+Depth=    9963 States=  7.5e+07 Transitions= 2.76e+10 Memory=  4582.002        t= 4.37e+04 R=   2e+03
+Depth=    9963 States=  7.6e+07 Transitions= 2.78e+10 Memory=  4627.803        t= 4.41e+04 R=   2e+03
+Depth=    9963 States=  7.7e+07 Transitions= 2.81e+10 Memory=  4673.604        t= 4.46e+04 R=   2e+03
+Depth=    9963 States=  7.8e+07 Transitions= 2.84e+10 Memory=  4719.404        t= 4.5e+04 R=   2e+03
+Depth=    9963 States=  7.9e+07 Transitions= 2.87e+10 Memory=  4765.205        t= 4.55e+04 R=   2e+03
+Depth=    9963 States=    8e+07 Transitions= 2.91e+10 Memory=  4811.006        t= 4.61e+04 R=   2e+03
+Depth=    9963 States=  8.1e+07 Transitions= 2.95e+10 Memory=  4856.807        t= 4.68e+04 R=   2e+03
+Depth=    9963 States=  8.2e+07 Transitions= 2.99e+10 Memory=  4902.608        t= 4.74e+04 R=   2e+03
+Depth=    9963 States=  8.3e+07 Transitions= 3.03e+10 Memory=  4948.701        t= 4.8e+04 R=   2e+03
+Depth=    9963 States=  8.4e+07 Transitions= 3.07e+10 Memory=  4995.283        t= 4.87e+04 R=   2e+03
+Depth=    9963 States=  8.5e+07 Transitions= 3.11e+10 Memory=  5041.084        t= 4.93e+04 R=   2e+03
+Depth=    9963 States=  8.6e+07 Transitions= 3.15e+10 Memory=  5087.276        t= 4.98e+04 R=   2e+03
+Depth=    9963 States=  8.7e+07 Transitions= 3.19e+10 Memory=  5133.369        t= 5.06e+04 R=   2e+03
+Depth=    9963 States=  8.8e+07 Transitions= 3.23e+10 Memory=  5179.170        t= 5.11e+04 R=   2e+03
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 9963, errors: 0
+ 88716525 states, stored
+3.243293e+10 states, matched
+3.2521646e+10 transitions (= stored+matched)
+1.8325967e+11 atomic steps
+hash conflicts: 1.7134961e+10 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 9814.374      equivalent memory usage for states (stored*(State-vector + overhead))
+ 4243.430      actual memory usage for states (compression: 43.24%)
+               state-vector as stored = 14 byte + 36 byte overhead
+  512.000      memory used for hash table (-w26)
+  457.764      memory used for DFS stack (-m10000000)
+ 5212.276      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 606546 5194 3779 2 1 ]
+unreached in proctype urcu_reader
+       line 267, "pan.___", state 57, "cache_dirty_urcu_gp_ctr = 0"
+       line 275, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 88, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 104, "(1)"
+       line 248, "pan.___", state 112, "(1)"
+       line 252, "pan.___", state 124, "(1)"
+       line 256, "pan.___", state 132, "(1)"
+       line 406, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 190, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 204, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 223, "(1)"
+       line 433, "pan.___", state 253, "(1)"
+       line 437, "pan.___", state 266, "(1)"
+       line 686, "pan.___", state 287, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 406, "pan.___", state 294, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 326, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 340, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 359, "(1)"
+       line 433, "pan.___", state 389, "(1)"
+       line 437, "pan.___", state 402, "(1)"
+       line 406, "pan.___", state 423, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 455, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 469, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 488, "(1)"
+       line 433, "pan.___", state 518, "(1)"
+       line 437, "pan.___", state 531, "(1)"
+       line 406, "pan.___", state 554, "cache_dirty_urcu_gp_ctr = 0"
+       line 406, "pan.___", state 556, "(1)"
+       line 406, "pan.___", state 557, "(cache_dirty_urcu_gp_ctr)"
+       line 406, "pan.___", state 557, "else"
+       line 406, "pan.___", state 560, "(1)"
+       line 410, "pan.___", state 568, "cache_dirty_urcu_active_readers = 0"
+       line 410, "pan.___", state 570, "(1)"
+       line 410, "pan.___", state 571, "(cache_dirty_urcu_active_readers)"
+       line 410, "pan.___", state 571, "else"
+       line 410, "pan.___", state 574, "(1)"
+       line 410, "pan.___", state 575, "(1)"
+       line 410, "pan.___", state 575, "(1)"
+       line 408, "pan.___", state 580, "((i<1))"
+       line 408, "pan.___", state 580, "((i>=1))"
+       line 415, "pan.___", state 586, "cache_dirty_rcu_ptr = 0"
+       line 415, "pan.___", state 588, "(1)"
+       line 415, "pan.___", state 589, "(cache_dirty_rcu_ptr)"
+       line 415, "pan.___", state 589, "else"
+       line 415, "pan.___", state 592, "(1)"
+       line 415, "pan.___", state 593, "(1)"
+       line 415, "pan.___", state 593, "(1)"
+       line 419, "pan.___", state 600, "cache_dirty_rcu_data[i] = 0"
+       line 419, "pan.___", state 602, "(1)"
+       line 419, "pan.___", state 603, "(cache_dirty_rcu_data[i])"
+       line 419, "pan.___", state 603, "else"
+       line 419, "pan.___", state 606, "(1)"
+       line 419, "pan.___", state 607, "(1)"
+       line 419, "pan.___", state 607, "(1)"
+       line 417, "pan.___", state 612, "((i<2))"
+       line 417, "pan.___", state 612, "((i>=2))"
+       line 424, "pan.___", state 619, "(1)"
+       line 424, "pan.___", state 620, "(!(cache_dirty_urcu_gp_ctr))"
+       line 424, "pan.___", state 620, "else"
+       line 424, "pan.___", state 623, "(1)"
+       line 424, "pan.___", state 624, "(1)"
+       line 424, "pan.___", state 624, "(1)"
+       line 428, "pan.___", state 632, "(1)"
+       line 428, "pan.___", state 633, "(!(cache_dirty_urcu_active_readers))"
+       line 428, "pan.___", state 633, "else"
+       line 428, "pan.___", state 636, "(1)"
+       line 428, "pan.___", state 637, "(1)"
+       line 428, "pan.___", state 637, "(1)"
+       line 426, "pan.___", state 642, "((i<1))"
+       line 426, "pan.___", state 642, "((i>=1))"
+       line 433, "pan.___", state 649, "(1)"
+       line 433, "pan.___", state 650, "(!(cache_dirty_rcu_ptr))"
+       line 433, "pan.___", state 650, "else"
+       line 433, "pan.___", state 653, "(1)"
+       line 433, "pan.___", state 654, "(1)"
+       line 433, "pan.___", state 654, "(1)"
+       line 437, "pan.___", state 662, "(1)"
+       line 437, "pan.___", state 663, "(!(cache_dirty_rcu_data[i]))"
+       line 437, "pan.___", state 663, "else"
+       line 437, "pan.___", state 666, "(1)"
+       line 437, "pan.___", state 667, "(1)"
+       line 437, "pan.___", state 667, "(1)"
+       line 435, "pan.___", state 672, "((i<2))"
+       line 435, "pan.___", state 672, "((i>=2))"
+       line 445, "pan.___", state 676, "(1)"
+       line 445, "pan.___", state 676, "(1)"
+       line 686, "pan.___", state 679, "cached_urcu_active_readers = (tmp+1)"
+       line 686, "pan.___", state 680, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 686, "pan.___", state 681, "(1)"
+       line 406, "pan.___", state 688, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 720, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 734, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 753, "(1)"
+       line 433, "pan.___", state 783, "(1)"
+       line 437, "pan.___", state 796, "(1)"
+       line 406, "pan.___", state 824, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 856, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 870, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 889, "(1)"
+       line 433, "pan.___", state 919, "(1)"
+       line 437, "pan.___", state 932, "(1)"
+       line 406, "pan.___", state 953, "cache_dirty_urcu_gp_ctr = 0"
+       line 406, "pan.___", state 955, "(1)"
+       line 406, "pan.___", state 956, "(cache_dirty_urcu_gp_ctr)"
+       line 406, "pan.___", state 956, "else"
+       line 406, "pan.___", state 959, "(1)"
+       line 410, "pan.___", state 967, "cache_dirty_urcu_active_readers = 0"
+       line 410, "pan.___", state 969, "(1)"
+       line 410, "pan.___", state 970, "(cache_dirty_urcu_active_readers)"
+       line 410, "pan.___", state 970, "else"
+       line 410, "pan.___", state 973, "(1)"
+       line 410, "pan.___", state 974, "(1)"
+       line 410, "pan.___", state 974, "(1)"
+       line 408, "pan.___", state 979, "((i<1))"
+       line 408, "pan.___", state 979, "((i>=1))"
+       line 415, "pan.___", state 985, "cache_dirty_rcu_ptr = 0"
+       line 415, "pan.___", state 987, "(1)"
+       line 415, "pan.___", state 988, "(cache_dirty_rcu_ptr)"
+       line 415, "pan.___", state 988, "else"
+       line 415, "pan.___", state 991, "(1)"
+       line 415, "pan.___", state 992, "(1)"
+       line 415, "pan.___", state 992, "(1)"
+       line 419, "pan.___", state 999, "cache_dirty_rcu_data[i] = 0"
+       line 419, "pan.___", state 1001, "(1)"
+       line 419, "pan.___", state 1002, "(cache_dirty_rcu_data[i])"
+       line 419, "pan.___", state 1002, "else"
+       line 419, "pan.___", state 1005, "(1)"
+       line 419, "pan.___", state 1006, "(1)"
+       line 419, "pan.___", state 1006, "(1)"
+       line 417, "pan.___", state 1011, "((i<2))"
+       line 417, "pan.___", state 1011, "((i>=2))"
+       line 424, "pan.___", state 1018, "(1)"
+       line 424, "pan.___", state 1019, "(!(cache_dirty_urcu_gp_ctr))"
+       line 424, "pan.___", state 1019, "else"
+       line 424, "pan.___", state 1022, "(1)"
+       line 424, "pan.___", state 1023, "(1)"
+       line 424, "pan.___", state 1023, "(1)"
+       line 428, "pan.___", state 1031, "(1)"
+       line 428, "pan.___", state 1032, "(!(cache_dirty_urcu_active_readers))"
+       line 428, "pan.___", state 1032, "else"
+       line 428, "pan.___", state 1035, "(1)"
+       line 428, "pan.___", state 1036, "(1)"
+       line 428, "pan.___", state 1036, "(1)"
+       line 426, "pan.___", state 1041, "((i<1))"
+       line 426, "pan.___", state 1041, "((i>=1))"
+       line 433, "pan.___", state 1048, "(1)"
+       line 433, "pan.___", state 1049, "(!(cache_dirty_rcu_ptr))"
+       line 433, "pan.___", state 1049, "else"
+       line 433, "pan.___", state 1052, "(1)"
+       line 433, "pan.___", state 1053, "(1)"
+       line 433, "pan.___", state 1053, "(1)"
+       line 437, "pan.___", state 1061, "(1)"
+       line 437, "pan.___", state 1062, "(!(cache_dirty_rcu_data[i]))"
+       line 437, "pan.___", state 1062, "else"
+       line 437, "pan.___", state 1065, "(1)"
+       line 437, "pan.___", state 1066, "(1)"
+       line 437, "pan.___", state 1066, "(1)"
+       line 435, "pan.___", state 1071, "((i<2))"
+       line 435, "pan.___", state 1071, "((i>=2))"
+       line 445, "pan.___", state 1075, "(1)"
+       line 445, "pan.___", state 1075, "(1)"
+       line 694, "pan.___", state 1079, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 406, "pan.___", state 1084, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 1116, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 1130, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 1149, "(1)"
+       line 433, "pan.___", state 1179, "(1)"
+       line 437, "pan.___", state 1192, "(1)"
+       line 406, "pan.___", state 1216, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 1248, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 1262, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 1281, "(1)"
+       line 433, "pan.___", state 1311, "(1)"
+       line 437, "pan.___", state 1324, "(1)"
+       line 406, "pan.___", state 1349, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 1381, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 1395, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 1414, "(1)"
+       line 433, "pan.___", state 1444, "(1)"
+       line 437, "pan.___", state 1457, "(1)"
+       line 406, "pan.___", state 1478, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 1510, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 1524, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 1543, "(1)"
+       line 433, "pan.___", state 1573, "(1)"
+       line 437, "pan.___", state 1586, "(1)"
+       line 406, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 1644, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 1658, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 1677, "(1)"
+       line 433, "pan.___", state 1707, "(1)"
+       line 437, "pan.___", state 1720, "(1)"
+       line 406, "pan.___", state 1741, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 1773, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 1787, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 1806, "(1)"
+       line 433, "pan.___", state 1836, "(1)"
+       line 437, "pan.___", state 1849, "(1)"
+       line 406, "pan.___", state 1873, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 1905, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 1919, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 1938, "(1)"
+       line 433, "pan.___", state 1968, "(1)"
+       line 437, "pan.___", state 1981, "(1)"
+       line 733, "pan.___", state 2002, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 406, "pan.___", state 2009, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 2041, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 2055, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 2074, "(1)"
+       line 433, "pan.___", state 2104, "(1)"
+       line 437, "pan.___", state 2117, "(1)"
+       line 406, "pan.___", state 2138, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 2170, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 2184, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 2203, "(1)"
+       line 433, "pan.___", state 2233, "(1)"
+       line 437, "pan.___", state 2246, "(1)"
+       line 406, "pan.___", state 2269, "cache_dirty_urcu_gp_ctr = 0"
+       line 406, "pan.___", state 2271, "(1)"
+       line 406, "pan.___", state 2272, "(cache_dirty_urcu_gp_ctr)"
+       line 406, "pan.___", state 2272, "else"
+       line 406, "pan.___", state 2275, "(1)"
+       line 410, "pan.___", state 2283, "cache_dirty_urcu_active_readers = 0"
+       line 410, "pan.___", state 2285, "(1)"
+       line 410, "pan.___", state 2286, "(cache_dirty_urcu_active_readers)"
+       line 410, "pan.___", state 2286, "else"
+       line 410, "pan.___", state 2289, "(1)"
+       line 410, "pan.___", state 2290, "(1)"
+       line 410, "pan.___", state 2290, "(1)"
+       line 408, "pan.___", state 2295, "((i<1))"
+       line 408, "pan.___", state 2295, "((i>=1))"
+       line 415, "pan.___", state 2301, "cache_dirty_rcu_ptr = 0"
+       line 415, "pan.___", state 2303, "(1)"
+       line 415, "pan.___", state 2304, "(cache_dirty_rcu_ptr)"
+       line 415, "pan.___", state 2304, "else"
+       line 415, "pan.___", state 2307, "(1)"
+       line 415, "pan.___", state 2308, "(1)"
+       line 415, "pan.___", state 2308, "(1)"
+       line 419, "pan.___", state 2315, "cache_dirty_rcu_data[i] = 0"
+       line 419, "pan.___", state 2317, "(1)"
+       line 419, "pan.___", state 2318, "(cache_dirty_rcu_data[i])"
+       line 419, "pan.___", state 2318, "else"
+       line 419, "pan.___", state 2321, "(1)"
+       line 419, "pan.___", state 2322, "(1)"
+       line 419, "pan.___", state 2322, "(1)"
+       line 417, "pan.___", state 2327, "((i<2))"
+       line 417, "pan.___", state 2327, "((i>=2))"
+       line 424, "pan.___", state 2334, "(1)"
+       line 424, "pan.___", state 2335, "(!(cache_dirty_urcu_gp_ctr))"
+       line 424, "pan.___", state 2335, "else"
+       line 424, "pan.___", state 2338, "(1)"
+       line 424, "pan.___", state 2339, "(1)"
+       line 424, "pan.___", state 2339, "(1)"
+       line 428, "pan.___", state 2347, "(1)"
+       line 428, "pan.___", state 2348, "(!(cache_dirty_urcu_active_readers))"
+       line 428, "pan.___", state 2348, "else"
+       line 428, "pan.___", state 2351, "(1)"
+       line 428, "pan.___", state 2352, "(1)"
+       line 428, "pan.___", state 2352, "(1)"
+       line 426, "pan.___", state 2357, "((i<1))"
+       line 426, "pan.___", state 2357, "((i>=1))"
+       line 433, "pan.___", state 2364, "(1)"
+       line 433, "pan.___", state 2365, "(!(cache_dirty_rcu_ptr))"
+       line 433, "pan.___", state 2365, "else"
+       line 433, "pan.___", state 2368, "(1)"
+       line 433, "pan.___", state 2369, "(1)"
+       line 433, "pan.___", state 2369, "(1)"
+       line 437, "pan.___", state 2377, "(1)"
+       line 437, "pan.___", state 2378, "(!(cache_dirty_rcu_data[i]))"
+       line 437, "pan.___", state 2378, "else"
+       line 437, "pan.___", state 2381, "(1)"
+       line 437, "pan.___", state 2382, "(1)"
+       line 437, "pan.___", state 2382, "(1)"
+       line 435, "pan.___", state 2387, "((i<2))"
+       line 435, "pan.___", state 2387, "((i>=2))"
+       line 445, "pan.___", state 2391, "(1)"
+       line 445, "pan.___", state 2391, "(1)"
+       line 733, "pan.___", state 2394, "cached_urcu_active_readers = (tmp+1)"
+       line 733, "pan.___", state 2395, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 733, "pan.___", state 2396, "(1)"
+       line 406, "pan.___", state 2403, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 2435, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 2449, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 2468, "(1)"
+       line 433, "pan.___", state 2498, "(1)"
+       line 437, "pan.___", state 2511, "(1)"
+       line 406, "pan.___", state 2538, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 2570, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 2584, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 2603, "(1)"
+       line 433, "pan.___", state 2633, "(1)"
+       line 437, "pan.___", state 2646, "(1)"
+       line 406, "pan.___", state 2667, "cache_dirty_urcu_gp_ctr = 0"
+       line 415, "pan.___", state 2699, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 2713, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 2732, "(1)"
+       line 433, "pan.___", state 2762, "(1)"
+       line 437, "pan.___", state 2775, "(1)"
+       line 244, "pan.___", state 2808, "(1)"
+       line 252, "pan.___", state 2828, "(1)"
+       line 256, "pan.___", state 2836, "(1)"
+       line 244, "pan.___", state 2851, "(1)"
+       line 252, "pan.___", state 2871, "(1)"
+       line 256, "pan.___", state 2879, "(1)"
+       line 928, "pan.___", state 2896, "-end-"
+       (245 of 2896 states)
+unreached in proctype urcu_writer
+       line 406, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 410, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 415, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 424, "pan.___", state 110, "(1)"
+       line 428, "pan.___", state 123, "(1)"
+       line 433, "pan.___", state 140, "(1)"
+       line 267, "pan.___", state 176, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 185, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 198, "cache_dirty_rcu_ptr = 0"
+       line 406, "pan.___", state 238, "cache_dirty_urcu_gp_ctr = 0"
+       line 410, "pan.___", state 252, "cache_dirty_urcu_active_readers = 0"
+       line 415, "pan.___", state 270, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 284, "cache_dirty_rcu_data[i] = 0"
+       line 424, "pan.___", state 303, "(1)"
+       line 428, "pan.___", state 316, "(1)"
+       line 433, "pan.___", state 333, "(1)"
+       line 437, "pan.___", state 346, "(1)"
+       line 410, "pan.___", state 383, "cache_dirty_urcu_active_readers = 0"
+       line 415, "pan.___", state 401, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 415, "cache_dirty_rcu_data[i] = 0"
+       line 428, "pan.___", state 447, "(1)"
+       line 433, "pan.___", state 464, "(1)"
+       line 437, "pan.___", state 477, "(1)"
+       line 410, "pan.___", state 522, "cache_dirty_urcu_active_readers = 0"
+       line 415, "pan.___", state 540, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 554, "cache_dirty_rcu_data[i] = 0"
+       line 428, "pan.___", state 586, "(1)"
+       line 433, "pan.___", state 603, "(1)"
+       line 437, "pan.___", state 616, "(1)"
+       line 410, "pan.___", state 651, "cache_dirty_urcu_active_readers = 0"
+       line 415, "pan.___", state 669, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 683, "cache_dirty_rcu_data[i] = 0"
+       line 428, "pan.___", state 715, "(1)"
+       line 433, "pan.___", state 732, "(1)"
+       line 437, "pan.___", state 745, "(1)"
+       line 410, "pan.___", state 782, "cache_dirty_urcu_active_readers = 0"
+       line 415, "pan.___", state 800, "cache_dirty_rcu_ptr = 0"
+       line 419, "pan.___", state 814, "cache_dirty_rcu_data[i] = 0"
+       line 428, "pan.___", state 846, "(1)"
+       line 433, "pan.___", state 863, "(1)"
+       line 437, "pan.___", state 876, "(1)"
+       line 267, "pan.___", state 931, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 940, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 955, "(1)"
+       line 279, "pan.___", state 962, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 978, "(1)"
+       line 248, "pan.___", state 986, "(1)"
+       line 252, "pan.___", state 998, "(1)"
+       line 256, "pan.___", state 1006, "(1)"
+       line 267, "pan.___", state 1037, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 1046, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1059, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1068, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1084, "(1)"
+       line 248, "pan.___", state 1092, "(1)"
+       line 252, "pan.___", state 1104, "(1)"
+       line 256, "pan.___", state 1112, "(1)"
+       line 271, "pan.___", state 1138, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1151, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1160, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1176, "(1)"
+       line 248, "pan.___", state 1184, "(1)"
+       line 252, "pan.___", state 1196, "(1)"
+       line 256, "pan.___", state 1204, "(1)"
+       line 267, "pan.___", state 1235, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 1244, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1257, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1266, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1282, "(1)"
+       line 248, "pan.___", state 1290, "(1)"
+       line 252, "pan.___", state 1302, "(1)"
+       line 256, "pan.___", state 1310, "(1)"
+       line 271, "pan.___", state 1336, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1349, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1358, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1374, "(1)"
+       line 248, "pan.___", state 1382, "(1)"
+       line 252, "pan.___", state 1394, "(1)"
+       line 256, "pan.___", state 1402, "(1)"
+       line 267, "pan.___", state 1433, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 1442, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1455, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1464, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1480, "(1)"
+       line 248, "pan.___", state 1488, "(1)"
+       line 252, "pan.___", state 1500, "(1)"
+       line 256, "pan.___", state 1508, "(1)"
+       line 271, "pan.___", state 1534, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1547, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1556, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1572, "(1)"
+       line 248, "pan.___", state 1580, "(1)"
+       line 252, "pan.___", state 1592, "(1)"
+       line 256, "pan.___", state 1600, "(1)"
+       line 267, "pan.___", state 1631, "cache_dirty_urcu_gp_ctr = 0"
+       line 271, "pan.___", state 1640, "cache_dirty_urcu_active_readers = 0"
+       line 275, "pan.___", state 1653, "cache_dirty_rcu_ptr = 0"
+       line 279, "pan.___", state 1662, "cache_dirty_rcu_data[i] = 0"
+       line 244, "pan.___", state 1678, "(1)"
+       line 248, "pan.___", state 1686, "(1)"
+       line 252, "pan.___", state 1698, "(1)"
+       line 256, "pan.___", state 1706, "(1)"
+       line 1303, "pan.___", state 1722, "-end-"
+       (103 of 1722 states)
+unreached in proctype :init:
+       (0 of 28 states)
+unreached in proctype :never:
+       line 1366, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 5.15e+04 seconds
+pan: rate 1721.7944 states/second
+pan: avg transition delay 1.5843e-06 usec
+cp .input.spin urcu_free.spin.input
+cp .input.spin.trail urcu_free.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free.ltl b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free.ltl
new file mode 100644 (file)
index 0000000..6be1be9
--- /dev/null
@@ -0,0 +1 @@
+[] (!read_poison)
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free.spin.input b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free.spin.input
new file mode 100644 (file)
index 0000000..ca70e6c
--- /dev/null
@@ -0,0 +1,1339 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_nested.define b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_nested.define
new file mode 100644 (file)
index 0000000..0fb59bd
--- /dev/null
@@ -0,0 +1 @@
+#define READER_NEST_LEVEL 2
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.define b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.define
new file mode 100644 (file)
index 0000000..d99d793
--- /dev/null
@@ -0,0 +1 @@
+#define NO_MB
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.log b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.log
new file mode 100644 (file)
index 0000000..fbb91fe
--- /dev/null
@@ -0,0 +1,824 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_mb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1362)
+Depth=    9193 States=    1e+06 Transitions= 2.02e+08 Memory=   513.615        t=    308 R=   3e+03
+Depth=    9193 States=    2e+06 Transitions= 4.02e+08 Memory=   560.100        t=    617 R=   3e+03
+pan: claim violated! (at depth 1482)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 9193, errors: 1
+  2638623 states, stored
+5.0906949e+08 states, matched
+5.1170812e+08 transitions (= stored+matched)
+2.7495835e+09 atomic steps
+hash conflicts: 3.3187407e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+  291.901      equivalent memory usage for states (stored*(State-vector + overhead))
+  123.793      actual memory usage for states (compression: 42.41%)
+               state-vector as stored = 13 byte + 36 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  589.494      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 39337 2870 221 2 2 ]
+unreached in proctype urcu_reader
+       line 894, "pan.___", state 12, "((i<1))"
+       line 894, "pan.___", state 12, "((i>=1))"
+       line 268, "pan.___", state 61, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 83, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 92, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 108, "(1)"
+       line 249, "pan.___", state 116, "(1)"
+       line 253, "pan.___", state 128, "(1)"
+       line 257, "pan.___", state 136, "(1)"
+       line 407, "pan.___", state 162, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 194, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 208, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 227, "(1)"
+       line 434, "pan.___", state 257, "(1)"
+       line 438, "pan.___", state 270, "(1)"
+       line 696, "pan.___", state 291, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 407, "pan.___", state 298, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 330, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 344, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 363, "(1)"
+       line 434, "pan.___", state 393, "(1)"
+       line 438, "pan.___", state 406, "(1)"
+       line 407, "pan.___", state 427, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 459, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 473, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 492, "(1)"
+       line 434, "pan.___", state 522, "(1)"
+       line 438, "pan.___", state 535, "(1)"
+       line 407, "pan.___", state 558, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 560, "(1)"
+       line 407, "pan.___", state 561, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 561, "else"
+       line 407, "pan.___", state 564, "(1)"
+       line 411, "pan.___", state 572, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 574, "(1)"
+       line 411, "pan.___", state 575, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 575, "else"
+       line 411, "pan.___", state 578, "(1)"
+       line 411, "pan.___", state 579, "(1)"
+       line 411, "pan.___", state 579, "(1)"
+       line 409, "pan.___", state 584, "((i<1))"
+       line 409, "pan.___", state 584, "((i>=1))"
+       line 416, "pan.___", state 590, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 592, "(1)"
+       line 416, "pan.___", state 593, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 593, "else"
+       line 416, "pan.___", state 596, "(1)"
+       line 416, "pan.___", state 597, "(1)"
+       line 416, "pan.___", state 597, "(1)"
+       line 420, "pan.___", state 604, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 606, "(1)"
+       line 420, "pan.___", state 607, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 607, "else"
+       line 420, "pan.___", state 610, "(1)"
+       line 420, "pan.___", state 611, "(1)"
+       line 420, "pan.___", state 611, "(1)"
+       line 418, "pan.___", state 616, "((i<2))"
+       line 418, "pan.___", state 616, "((i>=2))"
+       line 425, "pan.___", state 623, "(1)"
+       line 425, "pan.___", state 624, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 624, "else"
+       line 425, "pan.___", state 627, "(1)"
+       line 425, "pan.___", state 628, "(1)"
+       line 425, "pan.___", state 628, "(1)"
+       line 429, "pan.___", state 636, "(1)"
+       line 429, "pan.___", state 637, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 637, "else"
+       line 429, "pan.___", state 640, "(1)"
+       line 429, "pan.___", state 641, "(1)"
+       line 429, "pan.___", state 641, "(1)"
+       line 427, "pan.___", state 646, "((i<1))"
+       line 427, "pan.___", state 646, "((i>=1))"
+       line 434, "pan.___", state 653, "(1)"
+       line 434, "pan.___", state 654, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 654, "else"
+       line 434, "pan.___", state 657, "(1)"
+       line 434, "pan.___", state 658, "(1)"
+       line 434, "pan.___", state 658, "(1)"
+       line 438, "pan.___", state 666, "(1)"
+       line 438, "pan.___", state 667, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 667, "else"
+       line 438, "pan.___", state 670, "(1)"
+       line 438, "pan.___", state 671, "(1)"
+       line 438, "pan.___", state 671, "(1)"
+       line 436, "pan.___", state 676, "((i<2))"
+       line 436, "pan.___", state 676, "((i>=2))"
+       line 446, "pan.___", state 680, "(1)"
+       line 446, "pan.___", state 680, "(1)"
+       line 696, "pan.___", state 683, "cached_urcu_active_readers = (tmp+1)"
+       line 696, "pan.___", state 684, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 696, "pan.___", state 685, "(1)"
+       line 407, "pan.___", state 692, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 724, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 738, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 757, "(1)"
+       line 434, "pan.___", state 787, "(1)"
+       line 438, "pan.___", state 800, "(1)"
+       line 407, "pan.___", state 828, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 860, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 874, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 893, "(1)"
+       line 434, "pan.___", state 923, "(1)"
+       line 438, "pan.___", state 936, "(1)"
+       line 407, "pan.___", state 957, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 959, "(1)"
+       line 407, "pan.___", state 960, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 960, "else"
+       line 407, "pan.___", state 963, "(1)"
+       line 411, "pan.___", state 971, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 973, "(1)"
+       line 411, "pan.___", state 974, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 974, "else"
+       line 411, "pan.___", state 977, "(1)"
+       line 411, "pan.___", state 978, "(1)"
+       line 411, "pan.___", state 978, "(1)"
+       line 409, "pan.___", state 983, "((i<1))"
+       line 409, "pan.___", state 983, "((i>=1))"
+       line 416, "pan.___", state 989, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 991, "(1)"
+       line 416, "pan.___", state 992, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 992, "else"
+       line 416, "pan.___", state 995, "(1)"
+       line 416, "pan.___", state 996, "(1)"
+       line 416, "pan.___", state 996, "(1)"
+       line 420, "pan.___", state 1003, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 1005, "(1)"
+       line 420, "pan.___", state 1006, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 1006, "else"
+       line 420, "pan.___", state 1009, "(1)"
+       line 420, "pan.___", state 1010, "(1)"
+       line 420, "pan.___", state 1010, "(1)"
+       line 418, "pan.___", state 1015, "((i<2))"
+       line 418, "pan.___", state 1015, "((i>=2))"
+       line 425, "pan.___", state 1022, "(1)"
+       line 425, "pan.___", state 1023, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 1023, "else"
+       line 425, "pan.___", state 1026, "(1)"
+       line 425, "pan.___", state 1027, "(1)"
+       line 425, "pan.___", state 1027, "(1)"
+       line 429, "pan.___", state 1035, "(1)"
+       line 429, "pan.___", state 1036, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 1036, "else"
+       line 429, "pan.___", state 1039, "(1)"
+       line 429, "pan.___", state 1040, "(1)"
+       line 429, "pan.___", state 1040, "(1)"
+       line 427, "pan.___", state 1045, "((i<1))"
+       line 427, "pan.___", state 1045, "((i>=1))"
+       line 434, "pan.___", state 1052, "(1)"
+       line 434, "pan.___", state 1053, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 1053, "else"
+       line 434, "pan.___", state 1056, "(1)"
+       line 434, "pan.___", state 1057, "(1)"
+       line 434, "pan.___", state 1057, "(1)"
+       line 438, "pan.___", state 1065, "(1)"
+       line 438, "pan.___", state 1066, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 1066, "else"
+       line 438, "pan.___", state 1069, "(1)"
+       line 438, "pan.___", state 1070, "(1)"
+       line 438, "pan.___", state 1070, "(1)"
+       line 436, "pan.___", state 1075, "((i<2))"
+       line 436, "pan.___", state 1075, "((i>=2))"
+       line 446, "pan.___", state 1079, "(1)"
+       line 446, "pan.___", state 1079, "(1)"
+       line 704, "pan.___", state 1083, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 407, "pan.___", state 1088, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1120, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1134, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1153, "(1)"
+       line 434, "pan.___", state 1183, "(1)"
+       line 438, "pan.___", state 1196, "(1)"
+       line 407, "pan.___", state 1220, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1252, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1266, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1285, "(1)"
+       line 434, "pan.___", state 1315, "(1)"
+       line 438, "pan.___", state 1328, "(1)"
+       line 407, "pan.___", state 1353, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1385, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1399, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1418, "(1)"
+       line 434, "pan.___", state 1448, "(1)"
+       line 438, "pan.___", state 1461, "(1)"
+       line 407, "pan.___", state 1482, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1514, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1528, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1547, "(1)"
+       line 434, "pan.___", state 1577, "(1)"
+       line 438, "pan.___", state 1590, "(1)"
+       line 407, "pan.___", state 1616, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1648, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1662, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1681, "(1)"
+       line 434, "pan.___", state 1711, "(1)"
+       line 438, "pan.___", state 1724, "(1)"
+       line 407, "pan.___", state 1745, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1777, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1791, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1810, "(1)"
+       line 434, "pan.___", state 1840, "(1)"
+       line 438, "pan.___", state 1853, "(1)"
+       line 407, "pan.___", state 1877, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1909, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1923, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1942, "(1)"
+       line 434, "pan.___", state 1972, "(1)"
+       line 438, "pan.___", state 1985, "(1)"
+       line 743, "pan.___", state 2006, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 407, "pan.___", state 2013, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2045, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2059, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2078, "(1)"
+       line 434, "pan.___", state 2108, "(1)"
+       line 438, "pan.___", state 2121, "(1)"
+       line 407, "pan.___", state 2142, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2174, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2188, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2207, "(1)"
+       line 434, "pan.___", state 2237, "(1)"
+       line 438, "pan.___", state 2250, "(1)"
+       line 407, "pan.___", state 2273, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 2275, "(1)"
+       line 407, "pan.___", state 2276, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 2276, "else"
+       line 407, "pan.___", state 2279, "(1)"
+       line 411, "pan.___", state 2287, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 2289, "(1)"
+       line 411, "pan.___", state 2290, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 2290, "else"
+       line 411, "pan.___", state 2293, "(1)"
+       line 411, "pan.___", state 2294, "(1)"
+       line 411, "pan.___", state 2294, "(1)"
+       line 409, "pan.___", state 2299, "((i<1))"
+       line 409, "pan.___", state 2299, "((i>=1))"
+       line 416, "pan.___", state 2305, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 2307, "(1)"
+       line 416, "pan.___", state 2308, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 2308, "else"
+       line 416, "pan.___", state 2311, "(1)"
+       line 416, "pan.___", state 2312, "(1)"
+       line 416, "pan.___", state 2312, "(1)"
+       line 420, "pan.___", state 2319, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 2321, "(1)"
+       line 420, "pan.___", state 2322, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 2322, "else"
+       line 420, "pan.___", state 2325, "(1)"
+       line 420, "pan.___", state 2326, "(1)"
+       line 420, "pan.___", state 2326, "(1)"
+       line 418, "pan.___", state 2331, "((i<2))"
+       line 418, "pan.___", state 2331, "((i>=2))"
+       line 425, "pan.___", state 2338, "(1)"
+       line 425, "pan.___", state 2339, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 2339, "else"
+       line 425, "pan.___", state 2342, "(1)"
+       line 425, "pan.___", state 2343, "(1)"
+       line 425, "pan.___", state 2343, "(1)"
+       line 429, "pan.___", state 2351, "(1)"
+       line 429, "pan.___", state 2352, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 2352, "else"
+       line 429, "pan.___", state 2355, "(1)"
+       line 429, "pan.___", state 2356, "(1)"
+       line 429, "pan.___", state 2356, "(1)"
+       line 427, "pan.___", state 2361, "((i<1))"
+       line 427, "pan.___", state 2361, "((i>=1))"
+       line 434, "pan.___", state 2368, "(1)"
+       line 434, "pan.___", state 2369, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 2369, "else"
+       line 434, "pan.___", state 2372, "(1)"
+       line 434, "pan.___", state 2373, "(1)"
+       line 434, "pan.___", state 2373, "(1)"
+       line 438, "pan.___", state 2381, "(1)"
+       line 438, "pan.___", state 2382, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 2382, "else"
+       line 438, "pan.___", state 2385, "(1)"
+       line 438, "pan.___", state 2386, "(1)"
+       line 438, "pan.___", state 2386, "(1)"
+       line 436, "pan.___", state 2391, "((i<2))"
+       line 436, "pan.___", state 2391, "((i>=2))"
+       line 446, "pan.___", state 2395, "(1)"
+       line 446, "pan.___", state 2395, "(1)"
+       line 743, "pan.___", state 2398, "cached_urcu_active_readers = (tmp+1)"
+       line 743, "pan.___", state 2399, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 743, "pan.___", state 2400, "(1)"
+       line 407, "pan.___", state 2407, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2439, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2453, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2472, "(1)"
+       line 434, "pan.___", state 2502, "(1)"
+       line 438, "pan.___", state 2515, "(1)"
+       line 407, "pan.___", state 2542, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2574, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2588, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2607, "(1)"
+       line 434, "pan.___", state 2637, "(1)"
+       line 438, "pan.___", state 2650, "(1)"
+       line 407, "pan.___", state 2671, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2703, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2717, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2736, "(1)"
+       line 434, "pan.___", state 2766, "(1)"
+       line 438, "pan.___", state 2779, "(1)"
+       line 245, "pan.___", state 2812, "(1)"
+       line 253, "pan.___", state 2832, "(1)"
+       line 257, "pan.___", state 2840, "(1)"
+       line 245, "pan.___", state 2855, "(1)"
+       line 253, "pan.___", state 2875, "(1)"
+       line 257, "pan.___", state 2883, "(1)"
+       line 929, "pan.___", state 2900, "-end-"
+       (246 of 2900 states)
+unreached in proctype urcu_writer
+       line 1018, "pan.___", state 12, "((i<1))"
+       line 1018, "pan.___", state 12, "((i>=1))"
+       line 407, "pan.___", state 47, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 53, "(1)"
+       line 411, "pan.___", state 61, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 67, "(1)"
+       line 411, "pan.___", state 68, "(1)"
+       line 411, "pan.___", state 68, "(1)"
+       line 409, "pan.___", state 73, "((i<1))"
+       line 409, "pan.___", state 73, "((i>=1))"
+       line 416, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 85, "(1)"
+       line 416, "pan.___", state 86, "(1)"
+       line 416, "pan.___", state 86, "(1)"
+       line 420, "pan.___", state 99, "(1)"
+       line 420, "pan.___", state 100, "(1)"
+       line 420, "pan.___", state 100, "(1)"
+       line 418, "pan.___", state 105, "((i<2))"
+       line 418, "pan.___", state 105, "((i>=2))"
+       line 425, "pan.___", state 112, "(1)"
+       line 425, "pan.___", state 113, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 113, "else"
+       line 425, "pan.___", state 116, "(1)"
+       line 425, "pan.___", state 117, "(1)"
+       line 425, "pan.___", state 117, "(1)"
+       line 429, "pan.___", state 125, "(1)"
+       line 429, "pan.___", state 126, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 126, "else"
+       line 429, "pan.___", state 129, "(1)"
+       line 429, "pan.___", state 130, "(1)"
+       line 429, "pan.___", state 130, "(1)"
+       line 427, "pan.___", state 135, "((i<1))"
+       line 427, "pan.___", state 135, "((i>=1))"
+       line 434, "pan.___", state 142, "(1)"
+       line 434, "pan.___", state 143, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 143, "else"
+       line 434, "pan.___", state 146, "(1)"
+       line 434, "pan.___", state 147, "(1)"
+       line 434, "pan.___", state 147, "(1)"
+       line 438, "pan.___", state 155, "(1)"
+       line 438, "pan.___", state 156, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 156, "else"
+       line 438, "pan.___", state 159, "(1)"
+       line 438, "pan.___", state 160, "(1)"
+       line 438, "pan.___", state 160, "(1)"
+       line 436, "pan.___", state 165, "((i<2))"
+       line 436, "pan.___", state 165, "((i>=2))"
+       line 446, "pan.___", state 169, "(1)"
+       line 446, "pan.___", state 169, "(1)"
+       line 268, "pan.___", state 178, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 187, "cache_dirty_urcu_active_readers = 0"
+       line 270, "pan.___", state 195, "((i<1))"
+       line 270, "pan.___", state 195, "((i>=1))"
+       line 276, "pan.___", state 200, "cache_dirty_rcu_ptr = 0"
+       line 1088, "pan.___", state 228, "old_data = cached_rcu_ptr"
+       line 1099, "pan.___", state 232, "_proc_urcu_writer = (_proc_urcu_writer|(1<<4))"
+       line 407, "pan.___", state 240, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 246, "(1)"
+       line 411, "pan.___", state 254, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 260, "(1)"
+       line 411, "pan.___", state 261, "(1)"
+       line 411, "pan.___", state 261, "(1)"
+       line 409, "pan.___", state 266, "((i<1))"
+       line 409, "pan.___", state 266, "((i>=1))"
+       line 416, "pan.___", state 274, "(1)"
+       line 416, "pan.___", state 275, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 275, "else"
+       line 416, "pan.___", state 278, "(1)"
+       line 416, "pan.___", state 279, "(1)"
+       line 416, "pan.___", state 279, "(1)"
+       line 420, "pan.___", state 286, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 292, "(1)"
+       line 420, "pan.___", state 293, "(1)"
+       line 420, "pan.___", state 293, "(1)"
+       line 418, "pan.___", state 298, "((i<2))"
+       line 418, "pan.___", state 298, "((i>=2))"
+       line 425, "pan.___", state 305, "(1)"
+       line 425, "pan.___", state 306, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 306, "else"
+       line 425, "pan.___", state 309, "(1)"
+       line 425, "pan.___", state 310, "(1)"
+       line 425, "pan.___", state 310, "(1)"
+       line 429, "pan.___", state 318, "(1)"
+       line 429, "pan.___", state 319, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 319, "else"
+       line 429, "pan.___", state 322, "(1)"
+       line 429, "pan.___", state 323, "(1)"
+       line 429, "pan.___", state 323, "(1)"
+       line 427, "pan.___", state 328, "((i<1))"
+       line 427, "pan.___", state 328, "((i>=1))"
+       line 434, "pan.___", state 335, "(1)"
+       line 434, "pan.___", state 336, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 336, "else"
+       line 434, "pan.___", state 339, "(1)"
+       line 434, "pan.___", state 340, "(1)"
+       line 434, "pan.___", state 340, "(1)"
+       line 438, "pan.___", state 348, "(1)"
+       line 438, "pan.___", state 349, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 349, "else"
+       line 438, "pan.___", state 352, "(1)"
+       line 438, "pan.___", state 353, "(1)"
+       line 438, "pan.___", state 353, "(1)"
+       line 436, "pan.___", state 358, "((i<2))"
+       line 436, "pan.___", state 358, "((i>=2))"
+       line 446, "pan.___", state 362, "(1)"
+       line 446, "pan.___", state 362, "(1)"
+       line 407, "pan.___", state 373, "(1)"
+       line 407, "pan.___", state 374, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 374, "else"
+       line 407, "pan.___", state 377, "(1)"
+       line 411, "pan.___", state 385, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 391, "(1)"
+       line 411, "pan.___", state 392, "(1)"
+       line 411, "pan.___", state 392, "(1)"
+       line 409, "pan.___", state 397, "((i<1))"
+       line 409, "pan.___", state 397, "((i>=1))"
+       line 416, "pan.___", state 403, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 409, "(1)"
+       line 416, "pan.___", state 410, "(1)"
+       line 416, "pan.___", state 410, "(1)"
+       line 420, "pan.___", state 417, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 423, "(1)"
+       line 420, "pan.___", state 424, "(1)"
+       line 420, "pan.___", state 424, "(1)"
+       line 418, "pan.___", state 429, "((i<2))"
+       line 418, "pan.___", state 429, "((i>=2))"
+       line 425, "pan.___", state 436, "(1)"
+       line 425, "pan.___", state 437, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 437, "else"
+       line 425, "pan.___", state 440, "(1)"
+       line 425, "pan.___", state 441, "(1)"
+       line 425, "pan.___", state 441, "(1)"
+       line 429, "pan.___", state 449, "(1)"
+       line 429, "pan.___", state 450, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 450, "else"
+       line 429, "pan.___", state 453, "(1)"
+       line 429, "pan.___", state 454, "(1)"
+       line 429, "pan.___", state 454, "(1)"
+       line 427, "pan.___", state 459, "((i<1))"
+       line 427, "pan.___", state 459, "((i>=1))"
+       line 434, "pan.___", state 466, "(1)"
+       line 434, "pan.___", state 467, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 467, "else"
+       line 434, "pan.___", state 470, "(1)"
+       line 434, "pan.___", state 471, "(1)"
+       line 434, "pan.___", state 471, "(1)"
+       line 438, "pan.___", state 479, "(1)"
+       line 438, "pan.___", state 480, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 480, "else"
+       line 438, "pan.___", state 483, "(1)"
+       line 438, "pan.___", state 484, "(1)"
+       line 438, "pan.___", state 484, "(1)"
+       line 436, "pan.___", state 489, "((i<2))"
+       line 436, "pan.___", state 489, "((i>=2))"
+       line 446, "pan.___", state 493, "(1)"
+       line 446, "pan.___", state 493, "(1)"
+       line 1153, "pan.___", state 504, "_proc_urcu_writer = (_proc_urcu_writer&~((1<<9)))"
+       line 1158, "pan.___", state 505, "_proc_urcu_writer = (_proc_urcu_writer&~(((1<<8)|(1<<7))))"
+       line 407, "pan.___", state 510, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 516, "(1)"
+       line 411, "pan.___", state 524, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 530, "(1)"
+       line 411, "pan.___", state 531, "(1)"
+       line 411, "pan.___", state 531, "(1)"
+       line 409, "pan.___", state 536, "((i<1))"
+       line 409, "pan.___", state 536, "((i>=1))"
+       line 416, "pan.___", state 542, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 548, "(1)"
+       line 416, "pan.___", state 549, "(1)"
+       line 416, "pan.___", state 549, "(1)"
+       line 420, "pan.___", state 556, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 562, "(1)"
+       line 420, "pan.___", state 563, "(1)"
+       line 420, "pan.___", state 563, "(1)"
+       line 418, "pan.___", state 568, "((i<2))"
+       line 418, "pan.___", state 568, "((i>=2))"
+       line 425, "pan.___", state 575, "(1)"
+       line 425, "pan.___", state 576, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 576, "else"
+       line 425, "pan.___", state 579, "(1)"
+       line 425, "pan.___", state 580, "(1)"
+       line 425, "pan.___", state 580, "(1)"
+       line 429, "pan.___", state 588, "(1)"
+       line 429, "pan.___", state 589, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 589, "else"
+       line 429, "pan.___", state 592, "(1)"
+       line 429, "pan.___", state 593, "(1)"
+       line 429, "pan.___", state 593, "(1)"
+       line 427, "pan.___", state 598, "((i<1))"
+       line 427, "pan.___", state 598, "((i>=1))"
+       line 434, "pan.___", state 605, "(1)"
+       line 434, "pan.___", state 606, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 606, "else"
+       line 434, "pan.___", state 609, "(1)"
+       line 434, "pan.___", state 610, "(1)"
+       line 434, "pan.___", state 610, "(1)"
+       line 438, "pan.___", state 618, "(1)"
+       line 438, "pan.___", state 619, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 619, "else"
+       line 438, "pan.___", state 622, "(1)"
+       line 438, "pan.___", state 623, "(1)"
+       line 438, "pan.___", state 623, "(1)"
+       line 446, "pan.___", state 632, "(1)"
+       line 446, "pan.___", state 632, "(1)"
+       line 407, "pan.___", state 639, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 653, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 671, "cache_dirty_rcu_ptr = 0"
+       line 425, "pan.___", state 704, "(1)"
+       line 429, "pan.___", state 717, "(1)"
+       line 434, "pan.___", state 734, "(1)"
+       line 438, "pan.___", state 747, "(1)"
+       line 411, "pan.___", state 784, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 802, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 816, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 848, "(1)"
+       line 434, "pan.___", state 865, "(1)"
+       line 438, "pan.___", state 878, "(1)"
+       line 1235, "pan.___", state 905, "_proc_urcu_writer = (_proc_urcu_writer|(1<<13))"
+       line 268, "pan.___", state 933, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 935, "(1)"
+       line 272, "pan.___", state 942, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 944, "(1)"
+       line 272, "pan.___", state 945, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 945, "else"
+       line 270, "pan.___", state 950, "((i<1))"
+       line 270, "pan.___", state 950, "((i>=1))"
+       line 276, "pan.___", state 955, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 957, "(1)"
+       line 276, "pan.___", state 958, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 958, "else"
+       line 280, "pan.___", state 964, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 966, "(1)"
+       line 280, "pan.___", state 967, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 967, "else"
+       line 278, "pan.___", state 972, "((i<2))"
+       line 278, "pan.___", state 972, "((i>=2))"
+       line 245, "pan.___", state 980, "(1)"
+       line 249, "pan.___", state 988, "(1)"
+       line 249, "pan.___", state 989, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 989, "else"
+       line 247, "pan.___", state 994, "((i<1))"
+       line 247, "pan.___", state 994, "((i>=1))"
+       line 253, "pan.___", state 1000, "(1)"
+       line 253, "pan.___", state 1001, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1001, "else"
+       line 257, "pan.___", state 1008, "(1)"
+       line 257, "pan.___", state 1009, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1009, "else"
+       line 262, "pan.___", state 1018, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1018, "else"
+       line 1289, "pan.___", state 1034, "((i<1))"
+       line 1289, "pan.___", state 1034, "((i>=1))"
+       line 268, "pan.___", state 1039, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1041, "(1)"
+       line 272, "pan.___", state 1048, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1050, "(1)"
+       line 272, "pan.___", state 1051, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1051, "else"
+       line 270, "pan.___", state 1056, "((i<1))"
+       line 270, "pan.___", state 1056, "((i>=1))"
+       line 276, "pan.___", state 1061, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1063, "(1)"
+       line 276, "pan.___", state 1064, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1064, "else"
+       line 280, "pan.___", state 1070, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1072, "(1)"
+       line 280, "pan.___", state 1073, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1073, "else"
+       line 278, "pan.___", state 1078, "((i<2))"
+       line 278, "pan.___", state 1078, "((i>=2))"
+       line 245, "pan.___", state 1086, "(1)"
+       line 249, "pan.___", state 1094, "(1)"
+       line 249, "pan.___", state 1095, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1095, "else"
+       line 247, "pan.___", state 1100, "((i<1))"
+       line 247, "pan.___", state 1100, "((i>=1))"
+       line 253, "pan.___", state 1106, "(1)"
+       line 253, "pan.___", state 1107, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1107, "else"
+       line 257, "pan.___", state 1114, "(1)"
+       line 257, "pan.___", state 1115, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1115, "else"
+       line 262, "pan.___", state 1124, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1124, "else"
+       line 295, "pan.___", state 1126, "(cache_dirty_urcu_gp_ctr)"
+       line 295, "pan.___", state 1126, "else"
+       line 1289, "pan.___", state 1127, "(cache_dirty_urcu_gp_ctr)"
+       line 1289, "pan.___", state 1127, "else"
+       line 268, "pan.___", state 1131, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1133, "(1)"
+       line 272, "pan.___", state 1140, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1142, "(1)"
+       line 272, "pan.___", state 1143, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1143, "else"
+       line 270, "pan.___", state 1148, "((i<1))"
+       line 270, "pan.___", state 1148, "((i>=1))"
+       line 276, "pan.___", state 1153, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1155, "(1)"
+       line 276, "pan.___", state 1156, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1156, "else"
+       line 280, "pan.___", state 1162, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1164, "(1)"
+       line 280, "pan.___", state 1165, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1165, "else"
+       line 278, "pan.___", state 1170, "((i<2))"
+       line 278, "pan.___", state 1170, "((i>=2))"
+       line 245, "pan.___", state 1178, "(1)"
+       line 249, "pan.___", state 1186, "(1)"
+       line 249, "pan.___", state 1187, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1187, "else"
+       line 247, "pan.___", state 1192, "((i<1))"
+       line 247, "pan.___", state 1192, "((i>=1))"
+       line 253, "pan.___", state 1198, "(1)"
+       line 253, "pan.___", state 1199, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1199, "else"
+       line 257, "pan.___", state 1206, "(1)"
+       line 257, "pan.___", state 1207, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1207, "else"
+       line 262, "pan.___", state 1216, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1216, "else"
+       line 1293, "pan.___", state 1219, "i = 0"
+       line 1293, "pan.___", state 1221, "reader_barrier = 1"
+       line 1293, "pan.___", state 1232, "((i<1))"
+       line 1293, "pan.___", state 1232, "((i>=1))"
+       line 268, "pan.___", state 1237, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1239, "(1)"
+       line 272, "pan.___", state 1246, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1248, "(1)"
+       line 272, "pan.___", state 1249, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1249, "else"
+       line 270, "pan.___", state 1254, "((i<1))"
+       line 270, "pan.___", state 1254, "((i>=1))"
+       line 276, "pan.___", state 1259, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1261, "(1)"
+       line 276, "pan.___", state 1262, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1262, "else"
+       line 280, "pan.___", state 1268, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1270, "(1)"
+       line 280, "pan.___", state 1271, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1271, "else"
+       line 278, "pan.___", state 1276, "((i<2))"
+       line 278, "pan.___", state 1276, "((i>=2))"
+       line 245, "pan.___", state 1284, "(1)"
+       line 249, "pan.___", state 1292, "(1)"
+       line 249, "pan.___", state 1293, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1293, "else"
+       line 247, "pan.___", state 1298, "((i<1))"
+       line 247, "pan.___", state 1298, "((i>=1))"
+       line 253, "pan.___", state 1304, "(1)"
+       line 253, "pan.___", state 1305, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1305, "else"
+       line 257, "pan.___", state 1312, "(1)"
+       line 257, "pan.___", state 1313, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1313, "else"
+       line 262, "pan.___", state 1322, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1322, "else"
+       line 295, "pan.___", state 1324, "(cache_dirty_urcu_gp_ctr)"
+       line 295, "pan.___", state 1324, "else"
+       line 1293, "pan.___", state 1325, "(cache_dirty_urcu_gp_ctr)"
+       line 1293, "pan.___", state 1325, "else"
+       line 272, "pan.___", state 1338, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1351, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1360, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1376, "(1)"
+       line 249, "pan.___", state 1384, "(1)"
+       line 253, "pan.___", state 1396, "(1)"
+       line 257, "pan.___", state 1404, "(1)"
+       line 268, "pan.___", state 1435, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1444, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1457, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1466, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1482, "(1)"
+       line 249, "pan.___", state 1490, "(1)"
+       line 253, "pan.___", state 1502, "(1)"
+       line 257, "pan.___", state 1510, "(1)"
+       line 268, "pan.___", state 1527, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1529, "(1)"
+       line 272, "pan.___", state 1536, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1538, "(1)"
+       line 272, "pan.___", state 1539, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1539, "else"
+       line 270, "pan.___", state 1544, "((i<1))"
+       line 270, "pan.___", state 1544, "((i>=1))"
+       line 276, "pan.___", state 1549, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1551, "(1)"
+       line 276, "pan.___", state 1552, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1552, "else"
+       line 280, "pan.___", state 1558, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1560, "(1)"
+       line 280, "pan.___", state 1561, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1561, "else"
+       line 278, "pan.___", state 1566, "((i<2))"
+       line 278, "pan.___", state 1566, "((i>=2))"
+       line 245, "pan.___", state 1574, "(1)"
+       line 249, "pan.___", state 1582, "(1)"
+       line 249, "pan.___", state 1583, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1583, "else"
+       line 247, "pan.___", state 1588, "((i<1))"
+       line 247, "pan.___", state 1588, "((i>=1))"
+       line 253, "pan.___", state 1594, "(1)"
+       line 253, "pan.___", state 1595, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1595, "else"
+       line 257, "pan.___", state 1602, "(1)"
+       line 257, "pan.___", state 1603, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1603, "else"
+       line 262, "pan.___", state 1612, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1612, "else"
+       line 1300, "pan.___", state 1615, "i = 0"
+       line 1300, "pan.___", state 1617, "reader_barrier = 1"
+       line 1300, "pan.___", state 1628, "((i<1))"
+       line 1300, "pan.___", state 1628, "((i>=1))"
+       line 268, "pan.___", state 1633, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1635, "(1)"
+       line 272, "pan.___", state 1642, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1644, "(1)"
+       line 272, "pan.___", state 1645, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1645, "else"
+       line 270, "pan.___", state 1650, "((i<1))"
+       line 270, "pan.___", state 1650, "((i>=1))"
+       line 276, "pan.___", state 1655, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1657, "(1)"
+       line 276, "pan.___", state 1658, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1658, "else"
+       line 280, "pan.___", state 1664, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1666, "(1)"
+       line 280, "pan.___", state 1667, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1667, "else"
+       line 278, "pan.___", state 1672, "((i<2))"
+       line 278, "pan.___", state 1672, "((i>=2))"
+       line 245, "pan.___", state 1680, "(1)"
+       line 249, "pan.___", state 1688, "(1)"
+       line 249, "pan.___", state 1689, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1689, "else"
+       line 247, "pan.___", state 1694, "((i<1))"
+       line 247, "pan.___", state 1694, "((i>=1))"
+       line 253, "pan.___", state 1700, "(1)"
+       line 253, "pan.___", state 1701, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1701, "else"
+       line 257, "pan.___", state 1708, "(1)"
+       line 257, "pan.___", state 1709, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1709, "else"
+       line 262, "pan.___", state 1718, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1718, "else"
+       line 295, "pan.___", state 1720, "(cache_dirty_urcu_gp_ctr)"
+       line 295, "pan.___", state 1720, "else"
+       line 1300, "pan.___", state 1721, "(cache_dirty_urcu_gp_ctr)"
+       line 1300, "pan.___", state 1721, "else"
+       line 1304, "pan.___", state 1724, "-end-"
+       (312 of 1724 states)
+unreached in proctype :init:
+       line 1319, "pan.___", state 13, "((i<1))"
+       line 1319, "pan.___", state 13, "((i>=1))"
+       (1 of 28 states)
+unreached in proctype :never:
+       line 1367, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 792 seconds
+pan: rate 3330.8797 states/second
+pan: avg transition delay 1.5481e-06 usec
+cp .input.spin urcu_free_no_mb.spin.input
+cp .input.spin.trail urcu_free_no_mb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.spin.input b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.spin.input
new file mode 100644 (file)
index 0000000..b35bf7b
--- /dev/null
@@ -0,0 +1,1340 @@
+#define NO_MB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.spin.input.trail b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_mb.spin.input.trail
new file mode 100644 (file)
index 0000000..915575e
--- /dev/null
@@ -0,0 +1,1485 @@
+-2:3:-2
+-4:-4:-4
+1:0:4654
+2:2:2900
+3:2:2905
+4:2:2909
+5:2:2917
+6:2:2921
+7:2:2925
+8:0:4654
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:4654
+16:3:4624
+17:3:4627
+18:3:4634
+19:3:4641
+20:3:4644
+21:3:4648
+22:3:4649
+23:0:4654
+24:3:4651
+25:0:4654
+26:2:2929
+27:0:4654
+28:2:2935
+29:0:4654
+30:2:2936
+31:0:4654
+32:2:2938
+33:0:4654
+34:2:2939
+35:0:4654
+36:2:2940
+37:0:4654
+38:2:2941
+39:0:4654
+40:2:2942
+41:2:2943
+42:2:2947
+43:2:2948
+44:2:2956
+45:2:2957
+46:2:2961
+47:2:2962
+48:2:2970
+49:2:2975
+50:2:2979
+51:2:2980
+52:2:2988
+53:2:2989
+54:2:2993
+55:2:2994
+56:2:2988
+57:2:2989
+58:2:2993
+59:2:2994
+60:2:3002
+61:2:3007
+62:2:3008
+63:2:3019
+64:2:3020
+65:2:3021
+66:2:3032
+67:2:3037
+68:2:3038
+69:2:3049
+70:2:3050
+71:2:3051
+72:2:3049
+73:2:3050
+74:2:3051
+75:2:3062
+76:2:3070
+77:0:4654
+78:2:2941
+79:0:4654
+80:2:3074
+81:2:3078
+82:2:3079
+83:2:3083
+84:2:3087
+85:2:3088
+86:2:3092
+87:2:3100
+88:2:3101
+89:2:3105
+90:2:3109
+91:2:3110
+92:2:3105
+93:2:3106
+94:2:3114
+95:0:4654
+96:2:2941
+97:0:4654
+98:2:3122
+99:2:3123
+100:2:3124
+101:0:4654
+102:2:2941
+103:0:4654
+104:2:3132
+105:0:4654
+106:2:2941
+107:0:4654
+108:2:3135
+109:2:3136
+110:2:3140
+111:2:3141
+112:2:3149
+113:2:3150
+114:2:3154
+115:2:3155
+116:2:3163
+117:2:3168
+118:2:3169
+119:2:3181
+120:2:3182
+121:2:3186
+122:2:3187
+123:2:3181
+124:2:3182
+125:2:3186
+126:2:3187
+127:2:3195
+128:2:3200
+129:2:3201
+130:2:3212
+131:2:3213
+132:2:3214
+133:2:3225
+134:2:3230
+135:2:3231
+136:2:3242
+137:2:3243
+138:2:3244
+139:2:3242
+140:2:3243
+141:2:3244
+142:2:3255
+143:2:3262
+144:0:4654
+145:2:2941
+146:0:4654
+147:2:3266
+148:2:3267
+149:2:3268
+150:2:3280
+151:2:3281
+152:2:3285
+153:2:3286
+154:2:3294
+155:2:3299
+156:2:3303
+157:2:3304
+158:2:3312
+159:2:3313
+160:2:3317
+161:2:3318
+162:2:3312
+163:2:3313
+164:2:3317
+165:2:3318
+166:2:3326
+167:2:3331
+168:2:3332
+169:2:3343
+170:2:3344
+171:2:3345
+172:2:3356
+173:2:3361
+174:2:3362
+175:2:3373
+176:2:3374
+177:2:3375
+178:2:3373
+179:2:3374
+180:2:3375
+181:2:3386
+182:2:3397
+183:2:3398
+184:0:4654
+185:2:2941
+186:0:4654
+187:2:3405
+188:2:3406
+189:2:3410
+190:2:3411
+191:2:3419
+192:2:3420
+193:2:3424
+194:2:3425
+195:2:3433
+196:2:3438
+197:2:3442
+198:2:3443
+199:2:3451
+200:2:3452
+201:2:3456
+202:2:3457
+203:2:3451
+204:2:3452
+205:2:3456
+206:2:3457
+207:2:3465
+208:2:3470
+209:2:3471
+210:2:3482
+211:2:3483
+212:2:3484
+213:2:3495
+214:2:3500
+215:2:3501
+216:2:3512
+217:2:3513
+218:2:3514
+219:2:3512
+220:2:3513
+221:2:3514
+222:2:3525
+223:0:4654
+224:2:2941
+225:0:4654
+226:2:3534
+227:2:3535
+228:2:3539
+229:2:3540
+230:2:3548
+231:2:3549
+232:2:3553
+233:2:3554
+234:2:3562
+235:2:3567
+236:2:3571
+237:2:3572
+238:2:3580
+239:2:3581
+240:2:3585
+241:2:3586
+242:2:3580
+243:2:3581
+244:2:3585
+245:2:3586
+246:2:3594
+247:2:3599
+248:2:3600
+249:2:3611
+250:2:3612
+251:2:3613
+252:2:3624
+253:2:3629
+254:2:3630
+255:2:3641
+256:2:3642
+257:2:3643
+258:2:3641
+259:2:3642
+260:2:3643
+261:2:3654
+262:2:3661
+263:0:4654
+264:2:2941
+265:0:4654
+266:2:3665
+267:2:3666
+268:2:3667
+269:2:3679
+270:2:3680
+271:2:3684
+272:2:3685
+273:2:3693
+274:2:3698
+275:2:3702
+276:2:3703
+277:2:3711
+278:2:3712
+279:2:3716
+280:2:3717
+281:2:3711
+282:2:3712
+283:2:3716
+284:2:3717
+285:2:3725
+286:2:3730
+287:2:3731
+288:2:3742
+289:2:3743
+290:2:3744
+291:2:3755
+292:2:3760
+293:2:3761
+294:2:3772
+295:2:3773
+296:2:3774
+297:2:3772
+298:2:3773
+299:2:3774
+300:2:3785
+301:2:3795
+302:2:3796
+303:0:4654
+304:2:2941
+305:0:4654
+306:2:3805
+307:2:3806
+308:0:4654
+309:2:2941
+310:0:4654
+311:2:3810
+312:0:4654
+313:2:3818
+314:0:4654
+315:2:2936
+316:0:4654
+317:2:2938
+318:0:4654
+319:2:2939
+320:0:4654
+321:2:2940
+322:0:4654
+323:2:2941
+324:0:4654
+325:2:2942
+326:2:2943
+327:2:2947
+328:2:2948
+329:2:2956
+330:2:2957
+331:2:2961
+332:2:2962
+333:2:2970
+334:2:2975
+335:2:2979
+336:2:2980
+337:2:2988
+338:2:2989
+339:2:2990
+340:2:2988
+341:2:2989
+342:2:2993
+343:2:2994
+344:2:3002
+345:2:3007
+346:2:3008
+347:2:3019
+348:2:3020
+349:2:3021
+350:2:3032
+351:2:3037
+352:2:3038
+353:2:3049
+354:2:3050
+355:2:3051
+356:2:3049
+357:2:3050
+358:2:3051
+359:2:3062
+360:2:3070
+361:0:4654
+362:2:2941
+363:0:4654
+364:2:3074
+365:2:3078
+366:2:3079
+367:2:3083
+368:2:3087
+369:2:3088
+370:2:3092
+371:2:3100
+372:2:3101
+373:2:3105
+374:2:3106
+375:2:3105
+376:2:3109
+377:2:3110
+378:2:3114
+379:0:4654
+380:2:2941
+381:0:4654
+382:2:3122
+383:2:3123
+384:2:3124
+385:0:4654
+386:2:2941
+387:0:4654
+388:2:3132
+389:0:4654
+390:2:2941
+391:0:4654
+392:2:3135
+393:2:3136
+394:2:3140
+395:2:3141
+396:2:3149
+397:2:3150
+398:2:3154
+399:2:3155
+400:2:3163
+401:2:3168
+402:2:3169
+403:2:3181
+404:2:3182
+405:2:3186
+406:2:3187
+407:2:3181
+408:2:3182
+409:2:3186
+410:2:3187
+411:2:3195
+412:2:3200
+413:2:3201
+414:2:3212
+415:2:3213
+416:2:3214
+417:2:3225
+418:2:3230
+419:2:3231
+420:2:3242
+421:2:3243
+422:2:3244
+423:2:3242
+424:2:3243
+425:2:3244
+426:2:3255
+427:2:3262
+428:0:4654
+429:2:2941
+430:0:4654
+431:2:3266
+432:2:3267
+433:2:3268
+434:2:3280
+435:2:3281
+436:2:3285
+437:2:3286
+438:2:3294
+439:2:3299
+440:2:3303
+441:2:3304
+442:2:3312
+443:2:3313
+444:2:3317
+445:2:3318
+446:2:3312
+447:2:3313
+448:2:3317
+449:2:3318
+450:2:3326
+451:2:3331
+452:2:3332
+453:2:3343
+454:2:3344
+455:2:3345
+456:2:3356
+457:2:3361
+458:2:3362
+459:2:3373
+460:2:3374
+461:2:3375
+462:2:3373
+463:2:3374
+464:2:3375
+465:2:3386
+466:2:3397
+467:2:3398
+468:0:4654
+469:2:2941
+470:0:4654
+471:2:3405
+472:2:3406
+473:2:3410
+474:2:3411
+475:2:3419
+476:2:3420
+477:2:3424
+478:2:3425
+479:2:3433
+480:2:3438
+481:2:3442
+482:2:3443
+483:2:3451
+484:2:3452
+485:2:3456
+486:2:3457
+487:2:3451
+488:2:3452
+489:2:3456
+490:2:3457
+491:2:3465
+492:2:3470
+493:2:3471
+494:2:3482
+495:2:3483
+496:2:3484
+497:2:3495
+498:2:3500
+499:2:3501
+500:2:3512
+501:2:3513
+502:2:3514
+503:2:3512
+504:2:3513
+505:2:3514
+506:2:3525
+507:0:4654
+508:2:2941
+509:0:4654
+510:2:3534
+511:2:3535
+512:2:3539
+513:2:3540
+514:2:3548
+515:2:3549
+516:2:3553
+517:2:3554
+518:2:3562
+519:2:3567
+520:2:3571
+521:2:3572
+522:2:3580
+523:2:3581
+524:2:3585
+525:2:3586
+526:2:3580
+527:2:3581
+528:2:3585
+529:2:3586
+530:2:3594
+531:2:3599
+532:2:3600
+533:2:3611
+534:2:3612
+535:2:3613
+536:2:3624
+537:2:3629
+538:2:3630
+539:2:3641
+540:2:3642
+541:2:3643
+542:2:3641
+543:2:3642
+544:2:3643
+545:2:3654
+546:2:3661
+547:0:4654
+548:2:2941
+549:0:4654
+550:2:3665
+551:2:3666
+552:2:3667
+553:2:3679
+554:2:3680
+555:2:3684
+556:2:3685
+557:2:3693
+558:2:3698
+559:2:3702
+560:2:3703
+561:2:3711
+562:2:3712
+563:2:3716
+564:2:3717
+565:2:3711
+566:2:3712
+567:2:3716
+568:2:3717
+569:2:3725
+570:2:3730
+571:2:3731
+572:2:3742
+573:2:3743
+574:2:3744
+575:2:3755
+576:2:3760
+577:2:3761
+578:2:3772
+579:2:3773
+580:2:3774
+581:2:3772
+582:2:3773
+583:2:3774
+584:2:3785
+585:2:3795
+586:2:3796
+587:0:4654
+588:2:2941
+589:0:4654
+590:2:3805
+591:2:3806
+592:0:4654
+593:2:2941
+594:0:4654
+595:2:3810
+596:0:4654
+597:2:3818
+598:0:4654
+599:2:2936
+600:0:4654
+601:2:2938
+602:0:4654
+603:2:2939
+604:0:4654
+605:2:2940
+606:0:4654
+607:2:2941
+608:0:4654
+609:2:2942
+610:2:2943
+611:2:2947
+612:2:2948
+613:2:2956
+614:2:2957
+615:2:2961
+616:2:2962
+617:2:2970
+618:2:2975
+619:2:2979
+620:2:2980
+621:2:2988
+622:2:2989
+623:2:2993
+624:2:2994
+625:2:2988
+626:2:2989
+627:2:2990
+628:2:3002
+629:2:3007
+630:2:3008
+631:2:3019
+632:2:3020
+633:2:3021
+634:2:3032
+635:2:3037
+636:2:3038
+637:2:3049
+638:2:3050
+639:2:3051
+640:2:3049
+641:2:3050
+642:2:3051
+643:2:3062
+644:2:3070
+645:0:4654
+646:2:2941
+647:0:4654
+648:2:3074
+649:2:3078
+650:2:3079
+651:2:3083
+652:2:3087
+653:2:3088
+654:2:3092
+655:2:3100
+656:2:3101
+657:2:3105
+658:2:3109
+659:2:3110
+660:2:3105
+661:2:3106
+662:2:3114
+663:0:4654
+664:2:2941
+665:0:4654
+666:2:3122
+667:2:3123
+668:2:3124
+669:0:4654
+670:2:2941
+671:0:4654
+672:2:3132
+673:0:4654
+674:2:2941
+675:0:4654
+676:2:3135
+677:2:3136
+678:2:3140
+679:2:3141
+680:2:3149
+681:2:3150
+682:2:3154
+683:2:3155
+684:2:3163
+685:2:3168
+686:2:3169
+687:2:3181
+688:2:3182
+689:2:3186
+690:2:3187
+691:2:3181
+692:2:3182
+693:2:3186
+694:2:3187
+695:2:3195
+696:2:3200
+697:2:3201
+698:2:3212
+699:2:3213
+700:2:3214
+701:2:3225
+702:2:3230
+703:2:3231
+704:2:3242
+705:2:3243
+706:2:3244
+707:2:3242
+708:2:3243
+709:2:3244
+710:2:3255
+711:2:3262
+712:0:4654
+713:2:2941
+714:0:4654
+715:2:3266
+716:2:3267
+717:2:3268
+718:2:3280
+719:2:3281
+720:2:3285
+721:2:3286
+722:2:3294
+723:2:3299
+724:2:3303
+725:2:3304
+726:2:3312
+727:2:3313
+728:2:3317
+729:2:3318
+730:2:3312
+731:2:3313
+732:2:3317
+733:2:3318
+734:2:3326
+735:2:3331
+736:2:3332
+737:2:3343
+738:2:3344
+739:2:3345
+740:2:3356
+741:2:3361
+742:2:3362
+743:2:3373
+744:2:3374
+745:2:3375
+746:2:3373
+747:2:3374
+748:2:3375
+749:2:3386
+750:2:3397
+751:2:3398
+752:0:4654
+753:2:2941
+754:0:4654
+755:2:3405
+756:2:3406
+757:2:3410
+758:2:3411
+759:2:3419
+760:2:3420
+761:2:3424
+762:2:3425
+763:2:3433
+764:2:3438
+765:2:3442
+766:2:3443
+767:2:3451
+768:2:3452
+769:2:3456
+770:2:3457
+771:2:3451
+772:2:3452
+773:2:3456
+774:2:3457
+775:2:3465
+776:2:3470
+777:2:3471
+778:2:3482
+779:2:3483
+780:2:3484
+781:2:3495
+782:2:3500
+783:2:3501
+784:2:3512
+785:2:3513
+786:2:3514
+787:2:3512
+788:2:3513
+789:2:3514
+790:2:3525
+791:0:4654
+792:2:2941
+793:0:4654
+794:2:3665
+795:2:3666
+796:2:3670
+797:2:3671
+798:2:3679
+799:2:3680
+800:2:3684
+801:2:3685
+802:2:3693
+803:2:3698
+804:2:3702
+805:2:3703
+806:2:3711
+807:2:3712
+808:2:3716
+809:2:3717
+810:2:3711
+811:2:3712
+812:2:3716
+813:2:3717
+814:2:3725
+815:2:3730
+816:2:3731
+817:2:3742
+818:2:3743
+819:2:3744
+820:2:3755
+821:2:3760
+822:2:3761
+823:2:3772
+824:2:3773
+825:2:3774
+826:2:3772
+827:2:3773
+828:2:3774
+829:2:3785
+830:2:3795
+831:2:3796
+832:0:4654
+833:2:2941
+834:0:4654
+835:2:3805
+836:2:3806
+837:0:4654
+838:2:2941
+839:0:4654
+840:2:3534
+841:2:3535
+842:2:3539
+843:2:3540
+844:2:3548
+845:2:3549
+846:2:3553
+847:2:3554
+848:2:3562
+849:2:3567
+850:2:3571
+851:2:3572
+852:2:3580
+853:2:3581
+854:2:3582
+855:2:3580
+856:2:3581
+857:2:3585
+858:2:3586
+859:2:3594
+860:2:3599
+861:2:3600
+862:2:3611
+863:2:3612
+864:2:3613
+865:2:3624
+866:2:3629
+867:2:3630
+868:2:3641
+869:2:3642
+870:2:3643
+871:2:3641
+872:2:3642
+873:2:3643
+874:2:3654
+875:2:3661
+876:0:4654
+877:2:2941
+878:0:4654
+879:2:3810
+880:0:4654
+881:2:3818
+882:0:4654
+883:2:3819
+884:0:4654
+885:2:3824
+886:0:4654
+887:1:29
+888:0:4654
+889:2:3825
+890:0:4654
+891:1:35
+892:0:4654
+893:2:3824
+894:0:4654
+895:1:36
+896:0:4654
+897:2:3825
+898:0:4654
+899:1:37
+900:0:4654
+901:2:3824
+902:0:4654
+903:1:38
+904:0:4654
+905:2:3825
+906:0:4654
+907:1:39
+908:0:4654
+909:2:3824
+910:0:4654
+911:1:40
+912:0:4654
+913:2:3825
+914:0:4654
+915:1:41
+916:0:4654
+917:2:3824
+918:0:4654
+919:1:42
+920:0:4654
+921:2:3825
+922:0:4654
+923:1:43
+924:0:4654
+925:2:3824
+926:0:4654
+927:1:44
+928:0:4654
+929:2:3825
+930:0:4654
+931:1:45
+932:0:4654
+933:2:3824
+934:0:4654
+935:1:46
+936:0:4654
+937:2:3825
+938:0:4654
+939:1:47
+940:0:4654
+941:2:3824
+942:0:4654
+943:1:48
+944:0:4654
+945:2:3825
+946:0:4654
+947:1:149
+948:0:4654
+949:2:3824
+950:0:4654
+951:1:151
+952:0:4654
+953:2:3825
+954:0:4654
+955:1:50
+956:0:4654
+957:2:3824
+958:0:4654
+959:1:157
+960:1:158
+961:1:162
+962:1:163
+963:1:171
+964:1:172
+965:1:176
+966:1:177
+967:1:185
+968:1:190
+969:1:194
+970:1:195
+971:1:203
+972:1:204
+973:1:208
+974:1:209
+975:1:203
+976:1:204
+977:1:208
+978:1:209
+979:1:217
+980:1:222
+981:1:223
+982:1:234
+983:1:235
+984:1:236
+985:1:247
+986:1:259
+987:1:260
+988:1:264
+989:1:265
+990:1:266
+991:1:264
+992:1:265
+993:1:266
+994:1:277
+995:0:4654
+996:2:3825
+997:0:4654
+998:1:46
+999:0:4654
+1000:2:3824
+1001:0:4654
+1002:1:47
+1003:0:4654
+1004:2:3825
+1005:0:4654
+1006:1:48
+1007:0:4654
+1008:2:3824
+1009:0:4654
+1010:1:149
+1011:0:4654
+1012:2:3825
+1013:0:4654
+1014:1:151
+1015:0:4654
+1016:2:3824
+1017:0:4654
+1018:1:50
+1019:0:4654
+1020:2:3825
+1021:0:4654
+1022:1:286
+1023:1:287
+1024:0:4654
+1025:2:3824
+1026:0:4654
+1027:1:46
+1028:0:4654
+1029:2:3825
+1030:0:4654
+1031:1:47
+1032:0:4654
+1033:2:3824
+1034:0:4654
+1035:1:48
+1036:0:4654
+1037:2:3825
+1038:0:4654
+1039:1:149
+1040:0:4654
+1041:2:3824
+1042:0:4654
+1043:1:151
+1044:0:4654
+1045:2:3825
+1046:0:4654
+1047:1:50
+1048:0:4654
+1049:2:3824
+1050:0:4654
+1051:1:293
+1052:1:294
+1053:1:298
+1054:1:299
+1055:1:307
+1056:1:308
+1057:1:312
+1058:1:313
+1059:1:321
+1060:1:326
+1061:1:330
+1062:1:331
+1063:1:339
+1064:1:340
+1065:1:344
+1066:1:345
+1067:1:339
+1068:1:340
+1069:1:344
+1070:1:345
+1071:1:353
+1072:1:358
+1073:1:359
+1074:1:370
+1075:1:371
+1076:1:372
+1077:1:383
+1078:1:395
+1079:1:396
+1080:1:400
+1081:1:401
+1082:1:402
+1083:1:400
+1084:1:401
+1085:1:402
+1086:1:413
+1087:0:4654
+1088:2:3825
+1089:0:4654
+1090:1:46
+1091:0:4654
+1092:2:3824
+1093:0:4654
+1094:1:47
+1095:0:4654
+1096:2:3825
+1097:0:4654
+1098:1:48
+1099:0:4654
+1100:2:3824
+1101:0:4654
+1102:1:149
+1103:0:4654
+1104:2:3825
+1105:0:4654
+1106:1:151
+1107:0:4654
+1108:2:3824
+1109:0:4654
+1110:1:50
+1111:0:4654
+1112:2:3825
+1113:0:4654
+1114:1:422
+1115:1:423
+1116:1:427
+1117:1:428
+1118:1:436
+1119:1:437
+1120:1:441
+1121:1:442
+1122:1:450
+1123:1:455
+1124:1:459
+1125:1:460
+1126:1:468
+1127:1:469
+1128:1:473
+1129:1:474
+1130:1:468
+1131:1:469
+1132:1:473
+1133:1:474
+1134:1:482
+1135:1:487
+1136:1:488
+1137:1:499
+1138:1:500
+1139:1:501
+1140:1:512
+1141:1:524
+1142:1:525
+1143:1:529
+1144:1:530
+1145:1:531
+1146:1:529
+1147:1:530
+1148:1:531
+1149:1:542
+1150:1:549
+1151:0:4654
+1152:2:3824
+1153:0:4654
+1154:1:46
+1155:0:4654
+1156:2:3825
+1157:0:4654
+1158:1:47
+1159:0:4654
+1160:2:3824
+1161:0:4654
+1162:1:48
+1163:0:4654
+1164:2:3825
+1165:0:4654
+1166:1:149
+1167:0:4654
+1168:2:3824
+1169:0:4654
+1170:1:151
+1171:0:4654
+1172:2:3825
+1173:0:4654
+1174:1:50
+1175:0:4654
+1176:2:3824
+1177:0:4654
+1178:1:687
+1179:1:688
+1180:1:692
+1181:1:693
+1182:1:701
+1183:1:702
+1184:1:703
+1185:1:715
+1186:1:720
+1187:1:724
+1188:1:725
+1189:1:733
+1190:1:734
+1191:1:738
+1192:1:739
+1193:1:733
+1194:1:734
+1195:1:738
+1196:1:739
+1197:1:747
+1198:1:752
+1199:1:753
+1200:1:764
+1201:1:765
+1202:1:766
+1203:1:777
+1204:1:789
+1205:1:790
+1206:1:794
+1207:1:795
+1208:1:796
+1209:1:794
+1210:1:795
+1211:1:796
+1212:1:807
+1213:0:4654
+1214:2:3825
+1215:0:4654
+1216:1:46
+1217:0:4654
+1218:2:3824
+1219:0:4654
+1220:1:47
+1221:0:4654
+1222:2:3825
+1223:0:4654
+1224:1:48
+1225:0:4654
+1226:2:3824
+1227:0:4654
+1228:1:149
+1229:0:4654
+1230:2:3825
+1231:0:4654
+1232:1:151
+1233:0:4654
+1234:2:3824
+1235:0:4654
+1236:1:50
+1237:0:4654
+1238:2:3825
+1239:0:4654
+1240:1:816
+1241:1:819
+1242:1:820
+1243:0:4654
+1244:2:3824
+1245:0:4654
+1246:1:46
+1247:0:4654
+1248:2:3825
+1249:0:4654
+1250:1:47
+1251:0:4654
+1252:2:3824
+1253:0:4654
+1254:1:48
+1255:0:4654
+1256:2:3825
+1257:0:4654
+1258:1:149
+1259:0:4654
+1260:2:3824
+1261:0:4654
+1262:1:151
+1263:0:4654
+1264:2:3825
+1265:0:4654
+1266:1:50
+1267:0:4654
+1268:2:3824
+1269:0:4654
+1270:1:823
+1271:1:824
+1272:1:828
+1273:1:829
+1274:1:837
+1275:1:838
+1276:1:842
+1277:1:843
+1278:1:851
+1279:1:856
+1280:1:860
+1281:1:861
+1282:1:869
+1283:1:870
+1284:1:874
+1285:1:875
+1286:1:869
+1287:1:870
+1288:1:874
+1289:1:875
+1290:1:883
+1291:1:888
+1292:1:889
+1293:1:900
+1294:1:901
+1295:1:902
+1296:1:913
+1297:1:925
+1298:1:926
+1299:1:930
+1300:1:931
+1301:1:932
+1302:1:930
+1303:1:931
+1304:1:932
+1305:1:943
+1306:0:4654
+1307:2:3825
+1308:0:4654
+1309:1:46
+1310:0:4654
+1311:2:3824
+1312:0:4654
+1313:1:47
+1314:0:4654
+1315:2:3825
+1316:0:4654
+1317:1:48
+1318:0:4654
+1319:2:3824
+1320:0:4654
+1321:1:149
+1322:0:4654
+1323:2:3825
+1324:0:4654
+1325:1:151
+1326:0:4654
+1327:2:3824
+1328:0:4654
+1329:1:50
+1330:0:4654
+1331:2:3825
+1332:0:4654
+1333:1:1083
+1334:1:1084
+1335:1:1088
+1336:1:1089
+1337:1:1097
+1338:1:1098
+1339:1:1102
+1340:1:1103
+1341:1:1111
+1342:1:1116
+1343:1:1120
+1344:1:1121
+1345:1:1129
+1346:1:1130
+1347:1:1134
+1348:1:1135
+1349:1:1129
+1350:1:1130
+1351:1:1134
+1352:1:1135
+1353:1:1143
+1354:1:1148
+1355:1:1149
+1356:1:1160
+1357:1:1161
+1358:1:1162
+1359:1:1173
+1360:1:1185
+1361:1:1186
+1362:1:1190
+1363:1:1191
+1364:1:1192
+1365:1:1190
+1366:1:1191
+1367:1:1192
+1368:1:1203
+1369:1:1210
+1370:1:1214
+1371:0:4654
+1372:2:3824
+1373:0:4654
+1374:1:46
+1375:0:4654
+1376:2:3825
+1377:0:4654
+1378:1:47
+1379:0:4654
+1380:2:3824
+1381:0:4654
+1382:1:48
+1383:0:4654
+1384:2:3825
+1385:0:4654
+1386:1:149
+1387:0:4654
+1388:2:3824
+1389:0:4654
+1390:1:151
+1391:0:4654
+1392:2:3825
+1393:0:4654
+1394:1:50
+1395:0:4654
+1396:2:3824
+1397:0:4654
+1398:1:1215
+1399:1:1216
+1400:1:1220
+1401:1:1221
+1402:1:1229
+1403:1:1230
+1404:1:1231
+1405:1:1243
+1406:1:1248
+1407:1:1252
+1408:1:1253
+1409:1:1261
+1410:1:1262
+1411:1:1266
+1412:1:1267
+1413:1:1261
+1414:1:1262
+1415:1:1266
+1416:1:1267
+1417:1:1275
+1418:1:1280
+1419:1:1281
+1420:1:1292
+1421:1:1293
+1422:1:1294
+1423:1:1305
+1424:1:1317
+1425:1:1318
+1426:1:1322
+1427:1:1323
+1428:1:1324
+1429:1:1322
+1430:1:1323
+1431:1:1324
+1432:1:1335
+1433:0:4654
+1434:2:3825
+1435:0:4654
+1436:1:46
+1437:0:4654
+1438:2:3824
+1439:0:4654
+1440:1:47
+1441:0:4654
+1442:2:3825
+1443:0:4654
+1444:1:48
+1445:0:4654
+1446:2:3824
+1447:0:4654
+1448:1:149
+1449:0:4654
+1450:2:3825
+1451:0:4654
+1452:1:151
+1453:0:4654
+1454:2:3824
+1455:0:4654
+1456:1:50
+1457:0:4654
+1458:2:3825
+1459:0:4654
+1460:1:1344
+1461:0:4654
+1462:2:3824
+1463:0:4654
+1464:1:2808
+1465:1:2815
+1466:1:2816
+1467:1:2823
+1468:1:2828
+1469:1:2835
+1470:1:2836
+1471:1:2835
+1472:1:2836
+1473:1:2843
+1474:1:2847
+1475:0:4654
+1476:2:3825
+1477:0:4654
+1478:1:1346
+1479:1:1347
+1480:0:4652
+1481:2:3824
+1482:0:4658
+1483:1:2492
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.define b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.define
new file mode 100644 (file)
index 0000000..73e61a4
--- /dev/null
@@ -0,0 +1 @@
+#define NO_RMB
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.log b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.log
new file mode 100644 (file)
index 0000000..91e8666
--- /dev/null
@@ -0,0 +1,540 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_rmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1362)
+Depth=    8300 States=    1e+06 Transitions= 1.86e+08 Memory=   513.029        t=    280 R=   4e+03
+Depth=   10044 States=    2e+06 Transitions= 3.84e+08 Memory=   559.221        t=    594 R=   3e+03
+Depth=   10044 States=    3e+06 Transitions= 6.04e+08 Memory=   605.803        t=    964 R=   3e+03
+pan: resizing hashtable to -w22..  done
+Depth=   10044 States=    4e+06 Transitions= 8.01e+08 Memory=   682.920        t= 1.27e+03 R=   3e+03
+Depth=   10044 States=    5e+06 Transitions= 9.95e+08 Memory=   728.721        t= 1.57e+03 R=   3e+03
+Depth=   10044 States=    6e+06 Transitions= 1.26e+09 Memory=   775.010        t= 1.99e+03 R=   3e+03
+Depth=   10044 States=    7e+06 Transitions= 1.79e+09 Memory=   821.592        t= 2.89e+03 R=   2e+03
+Depth=   10044 States=    8e+06 Transitions= 2.16e+09 Memory=   867.979        t= 3.5e+03 R=   2e+03
+Depth=   10044 States=    9e+06 Transitions= 2.51e+09 Memory=   914.072        t= 4.1e+03 R=   2e+03
+pan: resizing hashtable to -w24..  done
+Depth=   10044 States=    1e+07 Transitions= 2.87e+09 Memory=  1085.334        t= 4.67e+03 R=   2e+03
+Depth=   10044 States=  1.1e+07 Transitions= 3.27e+09 Memory=  1132.404        t= 5.29e+03 R=   2e+03
+Depth=   10044 States=  1.2e+07 Transitions= 3.66e+09 Memory=  1179.377        t= 5.91e+03 R=   2e+03
+pan: claim violated! (at depth 1680)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 10044, errors: 1
+ 12387044 states, stored
+3.7683983e+09 states, matched
+3.7807853e+09 transitions (= stored+matched)
+2.0718621e+10 atomic steps
+hash conflicts: 2.2091218e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 1370.332      equivalent memory usage for states (stored*(State-vector + overhead))
+  612.258      actual memory usage for states (compression: 44.68%)
+               state-vector as stored = 16 byte + 36 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+ 1197.736      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 166228 1420 961 2 2 ]
+unreached in proctype urcu_reader
+       line 894, "pan.___", state 12, "((i<1))"
+       line 894, "pan.___", state 12, "((i>=1))"
+       line 268, "pan.___", state 57, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 88, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 104, "(1)"
+       line 249, "pan.___", state 112, "(1)"
+       line 253, "pan.___", state 124, "(1)"
+       line 257, "pan.___", state 132, "(1)"
+       line 407, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 190, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 204, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 223, "(1)"
+       line 434, "pan.___", state 253, "(1)"
+       line 438, "pan.___", state 266, "(1)"
+       line 687, "pan.___", state 287, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 407, "pan.___", state 294, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 326, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 340, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 359, "(1)"
+       line 434, "pan.___", state 389, "(1)"
+       line 438, "pan.___", state 402, "(1)"
+       line 407, "pan.___", state 423, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 455, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 469, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 488, "(1)"
+       line 434, "pan.___", state 518, "(1)"
+       line 438, "pan.___", state 531, "(1)"
+       line 407, "pan.___", state 554, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 556, "(1)"
+       line 407, "pan.___", state 557, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 557, "else"
+       line 407, "pan.___", state 560, "(1)"
+       line 411, "pan.___", state 568, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 570, "(1)"
+       line 411, "pan.___", state 571, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 571, "else"
+       line 411, "pan.___", state 574, "(1)"
+       line 411, "pan.___", state 575, "(1)"
+       line 411, "pan.___", state 575, "(1)"
+       line 409, "pan.___", state 580, "((i<1))"
+       line 409, "pan.___", state 580, "((i>=1))"
+       line 416, "pan.___", state 586, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 588, "(1)"
+       line 416, "pan.___", state 589, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 589, "else"
+       line 416, "pan.___", state 592, "(1)"
+       line 416, "pan.___", state 593, "(1)"
+       line 416, "pan.___", state 593, "(1)"
+       line 420, "pan.___", state 600, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 602, "(1)"
+       line 420, "pan.___", state 603, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 603, "else"
+       line 420, "pan.___", state 606, "(1)"
+       line 420, "pan.___", state 607, "(1)"
+       line 420, "pan.___", state 607, "(1)"
+       line 418, "pan.___", state 612, "((i<2))"
+       line 418, "pan.___", state 612, "((i>=2))"
+       line 425, "pan.___", state 619, "(1)"
+       line 425, "pan.___", state 620, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 620, "else"
+       line 425, "pan.___", state 623, "(1)"
+       line 425, "pan.___", state 624, "(1)"
+       line 425, "pan.___", state 624, "(1)"
+       line 429, "pan.___", state 632, "(1)"
+       line 429, "pan.___", state 633, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 633, "else"
+       line 429, "pan.___", state 636, "(1)"
+       line 429, "pan.___", state 637, "(1)"
+       line 429, "pan.___", state 637, "(1)"
+       line 427, "pan.___", state 642, "((i<1))"
+       line 427, "pan.___", state 642, "((i>=1))"
+       line 434, "pan.___", state 649, "(1)"
+       line 434, "pan.___", state 650, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 650, "else"
+       line 434, "pan.___", state 653, "(1)"
+       line 434, "pan.___", state 654, "(1)"
+       line 434, "pan.___", state 654, "(1)"
+       line 438, "pan.___", state 662, "(1)"
+       line 438, "pan.___", state 663, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 663, "else"
+       line 438, "pan.___", state 666, "(1)"
+       line 438, "pan.___", state 667, "(1)"
+       line 438, "pan.___", state 667, "(1)"
+       line 436, "pan.___", state 672, "((i<2))"
+       line 436, "pan.___", state 672, "((i>=2))"
+       line 446, "pan.___", state 676, "(1)"
+       line 446, "pan.___", state 676, "(1)"
+       line 687, "pan.___", state 679, "cached_urcu_active_readers = (tmp+1)"
+       line 687, "pan.___", state 680, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 687, "pan.___", state 681, "(1)"
+       line 407, "pan.___", state 688, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 720, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 734, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 753, "(1)"
+       line 434, "pan.___", state 783, "(1)"
+       line 438, "pan.___", state 796, "(1)"
+       line 407, "pan.___", state 824, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 856, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 870, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 889, "(1)"
+       line 434, "pan.___", state 919, "(1)"
+       line 438, "pan.___", state 932, "(1)"
+       line 407, "pan.___", state 953, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 955, "(1)"
+       line 407, "pan.___", state 956, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 956, "else"
+       line 407, "pan.___", state 959, "(1)"
+       line 411, "pan.___", state 967, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 969, "(1)"
+       line 411, "pan.___", state 970, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 970, "else"
+       line 411, "pan.___", state 973, "(1)"
+       line 411, "pan.___", state 974, "(1)"
+       line 411, "pan.___", state 974, "(1)"
+       line 409, "pan.___", state 979, "((i<1))"
+       line 409, "pan.___", state 979, "((i>=1))"
+       line 416, "pan.___", state 985, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 987, "(1)"
+       line 416, "pan.___", state 988, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 988, "else"
+       line 416, "pan.___", state 991, "(1)"
+       line 416, "pan.___", state 992, "(1)"
+       line 416, "pan.___", state 992, "(1)"
+       line 420, "pan.___", state 999, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 1001, "(1)"
+       line 420, "pan.___", state 1002, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 1002, "else"
+       line 420, "pan.___", state 1005, "(1)"
+       line 420, "pan.___", state 1006, "(1)"
+       line 420, "pan.___", state 1006, "(1)"
+       line 418, "pan.___", state 1011, "((i<2))"
+       line 418, "pan.___", state 1011, "((i>=2))"
+       line 425, "pan.___", state 1018, "(1)"
+       line 425, "pan.___", state 1019, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 1019, "else"
+       line 425, "pan.___", state 1022, "(1)"
+       line 425, "pan.___", state 1023, "(1)"
+       line 425, "pan.___", state 1023, "(1)"
+       line 429, "pan.___", state 1031, "(1)"
+       line 429, "pan.___", state 1032, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 1032, "else"
+       line 429, "pan.___", state 1035, "(1)"
+       line 429, "pan.___", state 1036, "(1)"
+       line 429, "pan.___", state 1036, "(1)"
+       line 427, "pan.___", state 1041, "((i<1))"
+       line 427, "pan.___", state 1041, "((i>=1))"
+       line 434, "pan.___", state 1048, "(1)"
+       line 434, "pan.___", state 1049, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 1049, "else"
+       line 434, "pan.___", state 1052, "(1)"
+       line 434, "pan.___", state 1053, "(1)"
+       line 434, "pan.___", state 1053, "(1)"
+       line 438, "pan.___", state 1061, "(1)"
+       line 438, "pan.___", state 1062, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 1062, "else"
+       line 438, "pan.___", state 1065, "(1)"
+       line 438, "pan.___", state 1066, "(1)"
+       line 438, "pan.___", state 1066, "(1)"
+       line 436, "pan.___", state 1071, "((i<2))"
+       line 436, "pan.___", state 1071, "((i>=2))"
+       line 446, "pan.___", state 1075, "(1)"
+       line 446, "pan.___", state 1075, "(1)"
+       line 695, "pan.___", state 1079, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 407, "pan.___", state 1084, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1116, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1130, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1149, "(1)"
+       line 434, "pan.___", state 1179, "(1)"
+       line 438, "pan.___", state 1192, "(1)"
+       line 407, "pan.___", state 1216, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1248, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1262, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1281, "(1)"
+       line 434, "pan.___", state 1311, "(1)"
+       line 438, "pan.___", state 1324, "(1)"
+       line 407, "pan.___", state 1349, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1381, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1395, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1414, "(1)"
+       line 434, "pan.___", state 1444, "(1)"
+       line 438, "pan.___", state 1457, "(1)"
+       line 407, "pan.___", state 1478, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1510, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1524, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1543, "(1)"
+       line 434, "pan.___", state 1573, "(1)"
+       line 438, "pan.___", state 1586, "(1)"
+       line 407, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1644, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1658, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1677, "(1)"
+       line 434, "pan.___", state 1707, "(1)"
+       line 438, "pan.___", state 1720, "(1)"
+       line 407, "pan.___", state 1741, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1773, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1787, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1806, "(1)"
+       line 434, "pan.___", state 1836, "(1)"
+       line 438, "pan.___", state 1849, "(1)"
+       line 407, "pan.___", state 1873, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1905, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1919, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1938, "(1)"
+       line 434, "pan.___", state 1968, "(1)"
+       line 438, "pan.___", state 1981, "(1)"
+       line 734, "pan.___", state 2002, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 407, "pan.___", state 2009, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2041, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2055, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2074, "(1)"
+       line 434, "pan.___", state 2104, "(1)"
+       line 438, "pan.___", state 2117, "(1)"
+       line 407, "pan.___", state 2138, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2170, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2184, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2203, "(1)"
+       line 434, "pan.___", state 2233, "(1)"
+       line 438, "pan.___", state 2246, "(1)"
+       line 407, "pan.___", state 2269, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 2271, "(1)"
+       line 407, "pan.___", state 2272, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 2272, "else"
+       line 407, "pan.___", state 2275, "(1)"
+       line 411, "pan.___", state 2283, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 2285, "(1)"
+       line 411, "pan.___", state 2286, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 2286, "else"
+       line 411, "pan.___", state 2289, "(1)"
+       line 411, "pan.___", state 2290, "(1)"
+       line 411, "pan.___", state 2290, "(1)"
+       line 409, "pan.___", state 2295, "((i<1))"
+       line 409, "pan.___", state 2295, "((i>=1))"
+       line 416, "pan.___", state 2301, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 2303, "(1)"
+       line 416, "pan.___", state 2304, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 2304, "else"
+       line 416, "pan.___", state 2307, "(1)"
+       line 416, "pan.___", state 2308, "(1)"
+       line 416, "pan.___", state 2308, "(1)"
+       line 420, "pan.___", state 2315, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 2317, "(1)"
+       line 420, "pan.___", state 2318, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 2318, "else"
+       line 420, "pan.___", state 2321, "(1)"
+       line 420, "pan.___", state 2322, "(1)"
+       line 420, "pan.___", state 2322, "(1)"
+       line 418, "pan.___", state 2327, "((i<2))"
+       line 418, "pan.___", state 2327, "((i>=2))"
+       line 425, "pan.___", state 2334, "(1)"
+       line 425, "pan.___", state 2335, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 2335, "else"
+       line 425, "pan.___", state 2338, "(1)"
+       line 425, "pan.___", state 2339, "(1)"
+       line 425, "pan.___", state 2339, "(1)"
+       line 429, "pan.___", state 2347, "(1)"
+       line 429, "pan.___", state 2348, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 2348, "else"
+       line 429, "pan.___", state 2351, "(1)"
+       line 429, "pan.___", state 2352, "(1)"
+       line 429, "pan.___", state 2352, "(1)"
+       line 427, "pan.___", state 2357, "((i<1))"
+       line 427, "pan.___", state 2357, "((i>=1))"
+       line 434, "pan.___", state 2364, "(1)"
+       line 434, "pan.___", state 2365, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 2365, "else"
+       line 434, "pan.___", state 2368, "(1)"
+       line 434, "pan.___", state 2369, "(1)"
+       line 434, "pan.___", state 2369, "(1)"
+       line 438, "pan.___", state 2377, "(1)"
+       line 438, "pan.___", state 2378, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 2378, "else"
+       line 438, "pan.___", state 2381, "(1)"
+       line 438, "pan.___", state 2382, "(1)"
+       line 438, "pan.___", state 2382, "(1)"
+       line 436, "pan.___", state 2387, "((i<2))"
+       line 436, "pan.___", state 2387, "((i>=2))"
+       line 446, "pan.___", state 2391, "(1)"
+       line 446, "pan.___", state 2391, "(1)"
+       line 734, "pan.___", state 2394, "cached_urcu_active_readers = (tmp+1)"
+       line 734, "pan.___", state 2395, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 734, "pan.___", state 2396, "(1)"
+       line 407, "pan.___", state 2403, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2435, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2449, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2468, "(1)"
+       line 434, "pan.___", state 2498, "(1)"
+       line 438, "pan.___", state 2511, "(1)"
+       line 407, "pan.___", state 2538, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2570, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2584, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2603, "(1)"
+       line 434, "pan.___", state 2633, "(1)"
+       line 438, "pan.___", state 2646, "(1)"
+       line 407, "pan.___", state 2667, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2699, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2713, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2732, "(1)"
+       line 434, "pan.___", state 2762, "(1)"
+       line 438, "pan.___", state 2775, "(1)"
+       line 407, "pan.___", state 2808, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2840, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2854, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2873, "(1)"
+       line 434, "pan.___", state 2903, "(1)"
+       line 438, "pan.___", state 2916, "(1)"
+       line 407, "pan.___", state 2935, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2967, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2981, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 3000, "(1)"
+       line 434, "pan.___", state 3030, "(1)"
+       line 438, "pan.___", state 3043, "(1)"
+       line 929, "pan.___", state 3064, "-end-"
+       (252 of 3064 states)
+unreached in proctype urcu_writer
+       line 1018, "pan.___", state 12, "((i<1))"
+       line 1018, "pan.___", state 12, "((i>=1))"
+       line 407, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 51, "(1)"
+       line 411, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 65, "(1)"
+       line 411, "pan.___", state 66, "(1)"
+       line 411, "pan.___", state 66, "(1)"
+       line 409, "pan.___", state 71, "((i<1))"
+       line 409, "pan.___", state 71, "((i>=1))"
+       line 416, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 83, "(1)"
+       line 416, "pan.___", state 84, "(1)"
+       line 416, "pan.___", state 84, "(1)"
+       line 420, "pan.___", state 97, "(1)"
+       line 420, "pan.___", state 98, "(1)"
+       line 420, "pan.___", state 98, "(1)"
+       line 418, "pan.___", state 103, "((i<2))"
+       line 418, "pan.___", state 103, "((i>=2))"
+       line 425, "pan.___", state 110, "(1)"
+       line 425, "pan.___", state 111, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 111, "else"
+       line 425, "pan.___", state 114, "(1)"
+       line 425, "pan.___", state 115, "(1)"
+       line 425, "pan.___", state 115, "(1)"
+       line 429, "pan.___", state 123, "(1)"
+       line 429, "pan.___", state 124, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 124, "else"
+       line 429, "pan.___", state 127, "(1)"
+       line 429, "pan.___", state 128, "(1)"
+       line 429, "pan.___", state 128, "(1)"
+       line 427, "pan.___", state 133, "((i<1))"
+       line 427, "pan.___", state 133, "((i>=1))"
+       line 434, "pan.___", state 140, "(1)"
+       line 434, "pan.___", state 141, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 141, "else"
+       line 434, "pan.___", state 144, "(1)"
+       line 434, "pan.___", state 145, "(1)"
+       line 434, "pan.___", state 145, "(1)"
+       line 438, "pan.___", state 153, "(1)"
+       line 438, "pan.___", state 154, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 154, "else"
+       line 438, "pan.___", state 157, "(1)"
+       line 438, "pan.___", state 158, "(1)"
+       line 438, "pan.___", state 158, "(1)"
+       line 436, "pan.___", state 163, "((i<2))"
+       line 436, "pan.___", state 163, "((i>=2))"
+       line 446, "pan.___", state 167, "(1)"
+       line 446, "pan.___", state 167, "(1)"
+       line 268, "pan.___", state 176, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 185, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 198, "cache_dirty_rcu_ptr = 0"
+       line 407, "pan.___", state 238, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 252, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 270, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 284, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 303, "(1)"
+       line 429, "pan.___", state 316, "(1)"
+       line 434, "pan.___", state 333, "(1)"
+       line 438, "pan.___", state 346, "(1)"
+       line 411, "pan.___", state 383, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 401, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 415, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 447, "(1)"
+       line 434, "pan.___", state 464, "(1)"
+       line 438, "pan.___", state 477, "(1)"
+       line 411, "pan.___", state 522, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 540, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 554, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 586, "(1)"
+       line 434, "pan.___", state 603, "(1)"
+       line 438, "pan.___", state 616, "(1)"
+       line 411, "pan.___", state 651, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 669, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 683, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 715, "(1)"
+       line 434, "pan.___", state 732, "(1)"
+       line 438, "pan.___", state 745, "(1)"
+       line 411, "pan.___", state 782, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 800, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 814, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 846, "(1)"
+       line 434, "pan.___", state 863, "(1)"
+       line 438, "pan.___", state 876, "(1)"
+       line 268, "pan.___", state 931, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 940, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 955, "(1)"
+       line 280, "pan.___", state 962, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 978, "(1)"
+       line 249, "pan.___", state 986, "(1)"
+       line 253, "pan.___", state 998, "(1)"
+       line 257, "pan.___", state 1006, "(1)"
+       line 268, "pan.___", state 1037, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1046, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1059, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1068, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1084, "(1)"
+       line 249, "pan.___", state 1092, "(1)"
+       line 253, "pan.___", state 1104, "(1)"
+       line 257, "pan.___", state 1112, "(1)"
+       line 272, "pan.___", state 1138, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1151, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1160, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1176, "(1)"
+       line 249, "pan.___", state 1184, "(1)"
+       line 253, "pan.___", state 1196, "(1)"
+       line 257, "pan.___", state 1204, "(1)"
+       line 268, "pan.___", state 1235, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1244, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1257, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1266, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1282, "(1)"
+       line 249, "pan.___", state 1290, "(1)"
+       line 253, "pan.___", state 1302, "(1)"
+       line 257, "pan.___", state 1310, "(1)"
+       line 272, "pan.___", state 1336, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1349, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1358, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1374, "(1)"
+       line 249, "pan.___", state 1382, "(1)"
+       line 253, "pan.___", state 1394, "(1)"
+       line 257, "pan.___", state 1402, "(1)"
+       line 268, "pan.___", state 1433, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1442, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1455, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1464, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1480, "(1)"
+       line 249, "pan.___", state 1488, "(1)"
+       line 253, "pan.___", state 1500, "(1)"
+       line 257, "pan.___", state 1508, "(1)"
+       line 272, "pan.___", state 1534, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1547, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1556, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1572, "(1)"
+       line 249, "pan.___", state 1580, "(1)"
+       line 253, "pan.___", state 1592, "(1)"
+       line 257, "pan.___", state 1600, "(1)"
+       line 268, "pan.___", state 1631, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1640, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1653, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1662, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1678, "(1)"
+       line 249, "pan.___", state 1686, "(1)"
+       line 253, "pan.___", state 1698, "(1)"
+       line 257, "pan.___", state 1706, "(1)"
+       line 1304, "pan.___", state 1722, "-end-"
+       (129 of 1722 states)
+unreached in proctype :init:
+       line 1319, "pan.___", state 13, "((i<1))"
+       line 1319, "pan.___", state 13, "((i>=1))"
+       (1 of 28 states)
+unreached in proctype :never:
+       line 1367, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 6.09e+03 seconds
+pan: rate 2033.9606 states/second
+pan: avg transition delay 1.6108e-06 usec
+cp .input.spin urcu_free_no_rmb.spin.input
+cp .input.spin.trail urcu_free_no_rmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.spin.input b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.spin.input
new file mode 100644 (file)
index 0000000..cf50dff
--- /dev/null
@@ -0,0 +1,1340 @@
+#define NO_RMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.spin.input.trail b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_rmb.spin.input.trail
new file mode 100644 (file)
index 0000000..d5ed67a
--- /dev/null
@@ -0,0 +1,1683 @@
+-2:3:-2
+-4:-4:-4
+1:0:4816
+2:2:3064
+3:2:3069
+4:2:3073
+5:2:3081
+6:2:3085
+7:2:3089
+8:0:4816
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:4816
+16:3:4786
+17:3:4789
+18:3:4796
+19:3:4803
+20:3:4806
+21:3:4810
+22:3:4811
+23:0:4816
+24:3:4813
+25:0:4816
+26:2:3093
+27:0:4816
+28:2:3099
+29:0:4816
+30:2:3100
+31:0:4816
+32:2:3102
+33:0:4816
+34:2:3103
+35:0:4816
+36:2:3104
+37:2:3105
+38:2:3109
+39:2:3110
+40:2:3118
+41:2:3119
+42:2:3123
+43:2:3124
+44:2:3132
+45:2:3137
+46:2:3141
+47:2:3142
+48:2:3150
+49:2:3151
+50:2:3155
+51:2:3156
+52:2:3150
+53:2:3151
+54:2:3155
+55:2:3156
+56:2:3164
+57:2:3169
+58:2:3170
+59:2:3181
+60:2:3182
+61:2:3183
+62:2:3194
+63:2:3199
+64:2:3200
+65:2:3211
+66:2:3212
+67:2:3213
+68:2:3211
+69:2:3212
+70:2:3213
+71:2:3224
+72:2:3232
+73:0:4816
+74:2:3103
+75:0:4816
+76:2:3236
+77:2:3240
+78:2:3241
+79:2:3245
+80:2:3249
+81:2:3250
+82:2:3254
+83:2:3262
+84:2:3263
+85:2:3267
+86:2:3271
+87:2:3272
+88:2:3267
+89:2:3268
+90:2:3276
+91:0:4816
+92:2:3103
+93:0:4816
+94:2:3284
+95:2:3285
+96:2:3286
+97:0:4816
+98:2:3103
+99:0:4816
+100:2:3291
+101:0:4816
+102:2:3995
+103:2:3996
+104:2:4000
+105:2:4004
+106:2:4005
+107:2:4009
+108:2:4014
+109:2:4022
+110:2:4026
+111:2:4027
+112:2:4022
+113:2:4026
+114:2:4027
+115:2:4031
+116:2:4038
+117:2:4045
+118:2:4046
+119:2:4053
+120:2:4058
+121:2:4065
+122:2:4066
+123:2:4065
+124:2:4066
+125:2:4073
+126:2:4077
+127:0:4816
+128:2:4082
+129:0:4816
+130:2:4083
+131:0:4816
+132:2:4084
+133:0:4816
+134:2:4085
+135:0:4816
+136:1:29
+137:0:4816
+138:2:4086
+139:0:4816
+140:1:35
+141:0:4816
+142:1:36
+143:0:4816
+144:2:4085
+145:0:4816
+146:1:37
+147:0:4816
+148:2:4086
+149:0:4816
+150:1:38
+151:0:4816
+152:2:4085
+153:0:4816
+154:1:39
+155:0:4816
+156:2:4086
+157:0:4816
+158:1:40
+159:0:4816
+160:2:4085
+161:0:4816
+162:1:41
+163:0:4816
+164:2:4086
+165:0:4816
+166:1:42
+167:0:4816
+168:1:43
+169:0:4816
+170:2:4085
+171:0:4816
+172:1:44
+173:0:4816
+174:2:4086
+175:0:4816
+176:1:53
+177:0:4816
+178:2:4085
+179:0:4816
+180:1:57
+181:1:58
+182:1:62
+183:1:66
+184:1:67
+185:1:71
+186:1:79
+187:1:80
+188:1:84
+189:1:88
+190:1:89
+191:1:84
+192:1:88
+193:1:89
+194:1:93
+195:1:100
+196:1:107
+197:1:108
+198:1:115
+199:1:120
+200:1:127
+201:1:128
+202:1:127
+203:1:128
+204:1:135
+205:1:139
+206:0:4816
+207:2:4086
+208:0:4816
+209:1:144
+210:0:4816
+211:2:4087
+212:0:4816
+213:2:4092
+214:0:4816
+215:2:4093
+216:0:4816
+217:2:4101
+218:2:4102
+219:2:4106
+220:2:4110
+221:2:4111
+222:2:4115
+223:2:4123
+224:2:4124
+225:2:4128
+226:2:4132
+227:2:4133
+228:2:4128
+229:2:4132
+230:2:4133
+231:2:4137
+232:2:4144
+233:2:4151
+234:2:4152
+235:2:4159
+236:2:4164
+237:2:4171
+238:2:4172
+239:2:4171
+240:2:4172
+241:2:4179
+242:2:4183
+243:0:4816
+244:2:3293
+245:2:3976
+246:0:4816
+247:2:3103
+248:0:4816
+249:2:3294
+250:0:4816
+251:2:3103
+252:0:4816
+253:2:3297
+254:2:3298
+255:2:3302
+256:2:3303
+257:2:3311
+258:2:3312
+259:2:3316
+260:2:3317
+261:2:3325
+262:2:3330
+263:2:3334
+264:2:3335
+265:2:3343
+266:2:3344
+267:2:3348
+268:2:3349
+269:2:3343
+270:2:3344
+271:2:3348
+272:2:3349
+273:2:3357
+274:2:3362
+275:2:3363
+276:2:3374
+277:2:3375
+278:2:3376
+279:2:3387
+280:2:3392
+281:2:3393
+282:2:3404
+283:2:3405
+284:2:3406
+285:2:3404
+286:2:3405
+287:2:3406
+288:2:3417
+289:2:3424
+290:0:4816
+291:2:3103
+292:0:4816
+293:2:3428
+294:2:3429
+295:2:3430
+296:2:3442
+297:2:3443
+298:2:3447
+299:2:3448
+300:2:3456
+301:2:3461
+302:2:3465
+303:2:3466
+304:2:3474
+305:2:3475
+306:2:3479
+307:2:3480
+308:2:3474
+309:2:3475
+310:2:3479
+311:2:3480
+312:2:3488
+313:2:3493
+314:2:3494
+315:2:3505
+316:2:3506
+317:2:3507
+318:2:3518
+319:2:3523
+320:2:3524
+321:2:3535
+322:2:3536
+323:2:3537
+324:2:3535
+325:2:3536
+326:2:3537
+327:2:3548
+328:2:3559
+329:2:3560
+330:0:4816
+331:2:3103
+332:0:4816
+333:2:3567
+334:2:3568
+335:2:3572
+336:2:3573
+337:2:3581
+338:2:3582
+339:2:3586
+340:2:3587
+341:2:3595
+342:2:3600
+343:2:3604
+344:2:3605
+345:2:3613
+346:2:3614
+347:2:3618
+348:2:3619
+349:2:3613
+350:2:3614
+351:2:3618
+352:2:3619
+353:2:3627
+354:2:3632
+355:2:3633
+356:2:3644
+357:2:3645
+358:2:3646
+359:2:3657
+360:2:3662
+361:2:3663
+362:2:3674
+363:2:3675
+364:2:3676
+365:2:3674
+366:2:3675
+367:2:3676
+368:2:3687
+369:0:4816
+370:2:3103
+371:0:4816
+372:2:3696
+373:2:3697
+374:2:3701
+375:2:3702
+376:2:3710
+377:2:3711
+378:2:3715
+379:2:3716
+380:2:3724
+381:2:3729
+382:2:3733
+383:2:3734
+384:2:3742
+385:2:3743
+386:2:3747
+387:2:3748
+388:2:3742
+389:2:3743
+390:2:3747
+391:2:3748
+392:2:3756
+393:2:3761
+394:2:3762
+395:2:3773
+396:2:3774
+397:2:3775
+398:2:3786
+399:2:3791
+400:2:3792
+401:2:3803
+402:2:3804
+403:2:3805
+404:2:3803
+405:2:3804
+406:2:3805
+407:2:3816
+408:2:3823
+409:0:4816
+410:2:3103
+411:0:4816
+412:2:3827
+413:2:3828
+414:2:3829
+415:2:3841
+416:2:3842
+417:2:3846
+418:2:3847
+419:2:3855
+420:2:3860
+421:2:3864
+422:2:3865
+423:2:3873
+424:2:3874
+425:2:3878
+426:2:3879
+427:2:3873
+428:2:3874
+429:2:3878
+430:2:3879
+431:2:3887
+432:2:3892
+433:2:3893
+434:2:3904
+435:2:3905
+436:2:3906
+437:2:3917
+438:2:3922
+439:2:3923
+440:2:3934
+441:2:3935
+442:2:3936
+443:2:3934
+444:2:3935
+445:2:3936
+446:2:3947
+447:2:3957
+448:2:3958
+449:0:4816
+450:2:3103
+451:0:4816
+452:2:3964
+453:0:4816
+454:2:4589
+455:2:4590
+456:2:4594
+457:2:4598
+458:2:4599
+459:2:4603
+460:2:4611
+461:2:4612
+462:2:4616
+463:2:4620
+464:2:4621
+465:2:4616
+466:2:4620
+467:2:4621
+468:2:4625
+469:2:4632
+470:2:4639
+471:2:4640
+472:2:4647
+473:2:4652
+474:2:4659
+475:2:4660
+476:2:4659
+477:2:4660
+478:2:4667
+479:2:4671
+480:0:4816
+481:2:4676
+482:0:4816
+483:2:4677
+484:0:4816
+485:2:4678
+486:0:4816
+487:2:4679
+488:0:4816
+489:1:53
+490:0:4816
+491:2:4680
+492:0:4816
+493:1:57
+494:1:58
+495:1:62
+496:1:66
+497:1:67
+498:1:71
+499:1:79
+500:1:80
+501:1:84
+502:1:88
+503:1:89
+504:1:84
+505:1:88
+506:1:89
+507:1:93
+508:1:100
+509:1:107
+510:1:108
+511:1:115
+512:1:120
+513:1:127
+514:1:128
+515:1:127
+516:1:128
+517:1:135
+518:1:139
+519:0:4816
+520:2:4679
+521:0:4816
+522:1:144
+523:0:4816
+524:2:4680
+525:0:4816
+526:2:4681
+527:0:4816
+528:2:4686
+529:0:4816
+530:2:4687
+531:0:4816
+532:2:4695
+533:2:4696
+534:2:4700
+535:2:4704
+536:2:4705
+537:2:4709
+538:2:4717
+539:2:4718
+540:2:4722
+541:2:4726
+542:2:4727
+543:2:4722
+544:2:4726
+545:2:4727
+546:2:4731
+547:2:4738
+548:2:4745
+549:2:4746
+550:2:4753
+551:2:4758
+552:2:4765
+553:2:4766
+554:2:4765
+555:2:4766
+556:2:4773
+557:2:4777
+558:0:4816
+559:2:3966
+560:2:3976
+561:0:4816
+562:2:3103
+563:0:4816
+564:2:3967
+565:2:3968
+566:0:4816
+567:2:3103
+568:0:4816
+569:2:3972
+570:0:4816
+571:2:3980
+572:0:4816
+573:2:3100
+574:0:4816
+575:2:3102
+576:0:4816
+577:2:3103
+578:0:4816
+579:2:3104
+580:2:3105
+581:2:3109
+582:2:3110
+583:2:3118
+584:2:3119
+585:2:3123
+586:2:3124
+587:2:3132
+588:2:3137
+589:2:3141
+590:2:3142
+591:2:3150
+592:2:3151
+593:2:3152
+594:2:3150
+595:2:3151
+596:2:3155
+597:2:3156
+598:2:3164
+599:2:3169
+600:2:3170
+601:2:3181
+602:2:3182
+603:2:3183
+604:2:3194
+605:2:3199
+606:2:3200
+607:2:3211
+608:2:3212
+609:2:3213
+610:2:3211
+611:2:3212
+612:2:3213
+613:2:3224
+614:2:3232
+615:0:4816
+616:2:3103
+617:0:4816
+618:2:3236
+619:2:3240
+620:2:3241
+621:2:3245
+622:2:3249
+623:2:3250
+624:2:3254
+625:2:3262
+626:2:3263
+627:2:3267
+628:2:3268
+629:2:3267
+630:2:3271
+631:2:3272
+632:2:3276
+633:0:4816
+634:2:3103
+635:0:4816
+636:2:3284
+637:2:3285
+638:2:3286
+639:0:4816
+640:2:3103
+641:0:4816
+642:2:3291
+643:0:4816
+644:2:3995
+645:2:3996
+646:2:4000
+647:2:4004
+648:2:4005
+649:2:4009
+650:2:4014
+651:2:4022
+652:2:4026
+653:2:4027
+654:2:4022
+655:2:4026
+656:2:4027
+657:2:4031
+658:2:4038
+659:2:4045
+660:2:4046
+661:2:4053
+662:2:4058
+663:2:4065
+664:2:4066
+665:2:4065
+666:2:4066
+667:2:4073
+668:2:4077
+669:0:4816
+670:2:4082
+671:0:4816
+672:2:4083
+673:0:4816
+674:2:4084
+675:0:4816
+676:2:4085
+677:0:4816
+678:1:53
+679:0:4816
+680:2:4086
+681:0:4816
+682:1:57
+683:1:58
+684:1:62
+685:1:66
+686:1:67
+687:1:71
+688:1:79
+689:1:80
+690:1:84
+691:1:88
+692:1:89
+693:1:84
+694:1:88
+695:1:89
+696:1:93
+697:1:100
+698:1:107
+699:1:108
+700:1:115
+701:1:120
+702:1:127
+703:1:128
+704:1:127
+705:1:128
+706:1:135
+707:1:139
+708:0:4816
+709:2:4085
+710:0:4816
+711:1:144
+712:0:4816
+713:2:4086
+714:0:4816
+715:2:4087
+716:0:4816
+717:2:4092
+718:0:4816
+719:2:4093
+720:0:4816
+721:2:4101
+722:2:4102
+723:2:4106
+724:2:4110
+725:2:4111
+726:2:4115
+727:2:4123
+728:2:4124
+729:2:4128
+730:2:4132
+731:2:4133
+732:2:4128
+733:2:4132
+734:2:4133
+735:2:4137
+736:2:4144
+737:2:4151
+738:2:4152
+739:2:4159
+740:2:4164
+741:2:4171
+742:2:4172
+743:2:4171
+744:2:4172
+745:2:4179
+746:2:4183
+747:0:4816
+748:2:3293
+749:2:3976
+750:0:4816
+751:2:3103
+752:0:4816
+753:2:3294
+754:0:4816
+755:2:3103
+756:0:4816
+757:2:3297
+758:2:3298
+759:2:3302
+760:2:3303
+761:2:3311
+762:2:3312
+763:2:3316
+764:2:3317
+765:2:3325
+766:2:3330
+767:2:3334
+768:2:3335
+769:2:3343
+770:2:3344
+771:2:3348
+772:2:3349
+773:2:3343
+774:2:3344
+775:2:3348
+776:2:3349
+777:2:3357
+778:2:3362
+779:2:3363
+780:2:3374
+781:2:3375
+782:2:3376
+783:2:3387
+784:2:3392
+785:2:3393
+786:2:3404
+787:2:3405
+788:2:3406
+789:2:3404
+790:2:3405
+791:2:3406
+792:2:3417
+793:2:3424
+794:0:4816
+795:2:3103
+796:0:4816
+797:2:3428
+798:2:3429
+799:2:3430
+800:2:3442
+801:2:3443
+802:2:3447
+803:2:3448
+804:2:3456
+805:2:3461
+806:2:3465
+807:2:3466
+808:2:3474
+809:2:3475
+810:2:3479
+811:2:3480
+812:2:3474
+813:2:3475
+814:2:3479
+815:2:3480
+816:2:3488
+817:2:3493
+818:2:3494
+819:2:3505
+820:2:3506
+821:2:3507
+822:2:3518
+823:2:3523
+824:2:3524
+825:2:3535
+826:2:3536
+827:2:3537
+828:2:3535
+829:2:3536
+830:2:3537
+831:2:3548
+832:2:3559
+833:2:3560
+834:0:4816
+835:2:3103
+836:0:4816
+837:2:3567
+838:2:3568
+839:2:3572
+840:2:3573
+841:2:3581
+842:2:3582
+843:2:3586
+844:2:3587
+845:2:3595
+846:2:3600
+847:2:3604
+848:2:3605
+849:2:3613
+850:2:3614
+851:2:3618
+852:2:3619
+853:2:3613
+854:2:3614
+855:2:3618
+856:2:3619
+857:2:3627
+858:2:3632
+859:2:3633
+860:2:3644
+861:2:3645
+862:2:3646
+863:2:3657
+864:2:3662
+865:2:3663
+866:2:3674
+867:2:3675
+868:2:3676
+869:2:3674
+870:2:3675
+871:2:3676
+872:2:3687
+873:0:4816
+874:2:3103
+875:0:4816
+876:2:3696
+877:2:3697
+878:2:3701
+879:2:3702
+880:2:3710
+881:2:3711
+882:2:3715
+883:2:3716
+884:2:3724
+885:2:3729
+886:2:3733
+887:2:3734
+888:2:3742
+889:2:3743
+890:2:3747
+891:2:3748
+892:2:3742
+893:2:3743
+894:2:3747
+895:2:3748
+896:2:3756
+897:2:3761
+898:2:3762
+899:2:3773
+900:2:3774
+901:2:3775
+902:2:3786
+903:2:3791
+904:2:3792
+905:2:3803
+906:2:3804
+907:2:3805
+908:2:3803
+909:2:3804
+910:2:3805
+911:2:3816
+912:2:3823
+913:0:4816
+914:2:3103
+915:0:4816
+916:2:3827
+917:2:3828
+918:2:3829
+919:2:3841
+920:2:3842
+921:2:3846
+922:2:3847
+923:2:3855
+924:2:3860
+925:2:3864
+926:2:3865
+927:2:3873
+928:2:3874
+929:2:3878
+930:2:3879
+931:2:3873
+932:2:3874
+933:2:3878
+934:2:3879
+935:2:3887
+936:2:3892
+937:2:3893
+938:2:3904
+939:2:3905
+940:2:3906
+941:2:3917
+942:2:3922
+943:2:3923
+944:2:3934
+945:2:3935
+946:2:3936
+947:2:3934
+948:2:3935
+949:2:3936
+950:2:3947
+951:2:3957
+952:2:3958
+953:0:4816
+954:2:3103
+955:0:4816
+956:2:3964
+957:0:4816
+958:2:4589
+959:2:4590
+960:2:4594
+961:2:4598
+962:2:4599
+963:2:4603
+964:2:4611
+965:2:4612
+966:2:4616
+967:2:4620
+968:2:4621
+969:2:4616
+970:2:4620
+971:2:4621
+972:2:4625
+973:2:4632
+974:2:4639
+975:2:4640
+976:2:4647
+977:2:4652
+978:2:4659
+979:2:4660
+980:2:4659
+981:2:4660
+982:2:4667
+983:2:4671
+984:0:4816
+985:2:4676
+986:0:4816
+987:2:4677
+988:0:4816
+989:2:4678
+990:0:4816
+991:2:4679
+992:0:4816
+993:1:53
+994:0:4816
+995:2:4680
+996:0:4816
+997:1:57
+998:1:58
+999:1:62
+1000:1:66
+1001:1:67
+1002:1:71
+1003:1:79
+1004:1:80
+1005:1:84
+1006:1:88
+1007:1:89
+1008:1:84
+1009:1:88
+1010:1:89
+1011:1:93
+1012:1:100
+1013:1:107
+1014:1:108
+1015:1:115
+1016:1:120
+1017:1:127
+1018:1:128
+1019:1:127
+1020:1:128
+1021:1:135
+1022:1:139
+1023:0:4816
+1024:2:4679
+1025:0:4816
+1026:1:144
+1027:0:4816
+1028:2:4680
+1029:0:4816
+1030:2:4681
+1031:0:4816
+1032:2:4686
+1033:0:4816
+1034:2:4687
+1035:0:4816
+1036:2:4695
+1037:2:4696
+1038:2:4700
+1039:2:4704
+1040:2:4705
+1041:2:4709
+1042:2:4717
+1043:2:4718
+1044:2:4722
+1045:2:4726
+1046:2:4727
+1047:2:4722
+1048:2:4726
+1049:2:4727
+1050:2:4731
+1051:2:4738
+1052:2:4745
+1053:2:4746
+1054:2:4753
+1055:2:4758
+1056:2:4765
+1057:2:4766
+1058:2:4765
+1059:2:4766
+1060:2:4773
+1061:2:4777
+1062:0:4816
+1063:2:3966
+1064:2:3976
+1065:0:4816
+1066:2:3103
+1067:0:4816
+1068:2:3967
+1069:2:3968
+1070:0:4816
+1071:2:3103
+1072:0:4816
+1073:2:3972
+1074:0:4816
+1075:2:3980
+1076:0:4816
+1077:2:3100
+1078:0:4816
+1079:2:3102
+1080:0:4816
+1081:2:3103
+1082:0:4816
+1083:2:3104
+1084:2:3105
+1085:2:3109
+1086:2:3110
+1087:2:3118
+1088:2:3119
+1089:2:3123
+1090:2:3124
+1091:2:3132
+1092:2:3137
+1093:2:3141
+1094:2:3142
+1095:2:3150
+1096:2:3151
+1097:2:3155
+1098:2:3156
+1099:2:3150
+1100:2:3151
+1101:2:3152
+1102:2:3164
+1103:2:3169
+1104:2:3170
+1105:2:3181
+1106:2:3182
+1107:2:3183
+1108:2:3194
+1109:2:3199
+1110:2:3200
+1111:2:3211
+1112:2:3212
+1113:2:3213
+1114:2:3211
+1115:2:3212
+1116:2:3213
+1117:2:3224
+1118:2:3232
+1119:0:4816
+1120:2:3103
+1121:0:4816
+1122:1:145
+1123:0:4816
+1124:1:147
+1125:0:4816
+1126:1:46
+1127:0:4816
+1128:1:153
+1129:1:154
+1130:1:158
+1131:1:159
+1132:1:167
+1133:1:168
+1134:1:172
+1135:1:173
+1136:1:181
+1137:1:186
+1138:1:190
+1139:1:191
+1140:1:199
+1141:1:200
+1142:1:204
+1143:1:205
+1144:1:199
+1145:1:200
+1146:1:204
+1147:1:205
+1148:1:213
+1149:1:218
+1150:1:219
+1151:1:230
+1152:1:231
+1153:1:232
+1154:1:243
+1155:1:248
+1156:1:249
+1157:1:260
+1158:1:261
+1159:1:262
+1160:1:260
+1161:1:261
+1162:1:262
+1163:1:273
+1164:0:4816
+1165:1:42
+1166:0:4816
+1167:1:43
+1168:0:4816
+1169:2:3236
+1170:2:3240
+1171:2:3241
+1172:2:3245
+1173:2:3249
+1174:2:3250
+1175:2:3254
+1176:2:3262
+1177:2:3263
+1178:2:3267
+1179:2:3271
+1180:2:3272
+1181:2:3267
+1182:2:3268
+1183:2:3276
+1184:0:4816
+1185:2:3103
+1186:0:4816
+1187:2:3284
+1188:2:3285
+1189:2:3286
+1190:0:4816
+1191:2:3103
+1192:0:4816
+1193:2:3291
+1194:0:4816
+1195:2:3995
+1196:2:3996
+1197:2:4000
+1198:2:4004
+1199:2:4005
+1200:2:4009
+1201:2:4014
+1202:2:4022
+1203:2:4026
+1204:2:4027
+1205:2:4022
+1206:2:4026
+1207:2:4027
+1208:2:4031
+1209:2:4038
+1210:2:4045
+1211:2:4046
+1212:2:4053
+1213:2:4058
+1214:2:4065
+1215:2:4066
+1216:2:4065
+1217:2:4066
+1218:2:4073
+1219:2:4077
+1220:0:4816
+1221:2:4082
+1222:0:4816
+1223:2:4083
+1224:0:4816
+1225:2:4084
+1226:0:4816
+1227:2:4085
+1228:0:4816
+1229:1:44
+1230:0:4816
+1231:2:4086
+1232:0:4816
+1233:1:145
+1234:0:4816
+1235:1:147
+1236:0:4816
+1237:2:4085
+1238:0:4816
+1239:1:46
+1240:0:4816
+1241:2:4086
+1242:0:4816
+1243:1:282
+1244:1:283
+1245:0:4816
+1246:1:42
+1247:0:4816
+1248:1:43
+1249:0:4816
+1250:2:4085
+1251:0:4816
+1252:1:44
+1253:0:4816
+1254:2:4086
+1255:0:4816
+1256:1:145
+1257:0:4816
+1258:1:147
+1259:0:4816
+1260:2:4085
+1261:0:4816
+1262:1:46
+1263:0:4816
+1264:2:4086
+1265:0:4816
+1266:1:289
+1267:1:290
+1268:1:294
+1269:1:295
+1270:1:303
+1271:1:304
+1272:1:308
+1273:1:309
+1274:1:317
+1275:1:322
+1276:1:326
+1277:1:327
+1278:1:335
+1279:1:336
+1280:1:340
+1281:1:341
+1282:1:335
+1283:1:336
+1284:1:340
+1285:1:341
+1286:1:349
+1287:1:354
+1288:1:355
+1289:1:366
+1290:1:367
+1291:1:368
+1292:1:379
+1293:1:384
+1294:1:385
+1295:1:396
+1296:1:397
+1297:1:398
+1298:1:396
+1299:1:404
+1300:1:405
+1301:1:409
+1302:0:4816
+1303:1:42
+1304:0:4816
+1305:1:43
+1306:0:4816
+1307:2:4085
+1308:0:4816
+1309:1:44
+1310:0:4816
+1311:2:4086
+1312:0:4816
+1313:1:145
+1314:0:4816
+1315:1:147
+1316:0:4816
+1317:2:4085
+1318:0:4816
+1319:1:46
+1320:0:4816
+1321:2:4086
+1322:0:4816
+1323:1:418
+1324:1:419
+1325:1:423
+1326:1:424
+1327:1:432
+1328:1:433
+1329:1:437
+1330:1:438
+1331:1:446
+1332:1:451
+1333:1:455
+1334:1:456
+1335:1:464
+1336:1:465
+1337:1:469
+1338:1:470
+1339:1:464
+1340:1:465
+1341:1:469
+1342:1:470
+1343:1:478
+1344:1:483
+1345:1:484
+1346:1:495
+1347:1:496
+1348:1:497
+1349:1:508
+1350:1:513
+1351:1:514
+1352:1:525
+1353:1:526
+1354:1:527
+1355:1:525
+1356:1:533
+1357:1:534
+1358:1:538
+1359:1:545
+1360:0:4816
+1361:1:42
+1362:0:4816
+1363:1:43
+1364:0:4816
+1365:2:4085
+1366:0:4816
+1367:1:44
+1368:0:4816
+1369:2:4086
+1370:0:4816
+1371:1:145
+1372:0:4816
+1373:1:147
+1374:0:4816
+1375:2:4085
+1376:0:4816
+1377:1:46
+1378:0:4816
+1379:2:4086
+1380:0:4816
+1381:1:683
+1382:1:684
+1383:1:688
+1384:1:689
+1385:1:697
+1386:1:698
+1387:1:699
+1388:1:711
+1389:1:716
+1390:1:720
+1391:1:721
+1392:1:729
+1393:1:730
+1394:1:734
+1395:1:735
+1396:1:729
+1397:1:730
+1398:1:734
+1399:1:735
+1400:1:743
+1401:1:748
+1402:1:749
+1403:1:760
+1404:1:761
+1405:1:762
+1406:1:773
+1407:1:778
+1408:1:779
+1409:1:790
+1410:1:791
+1411:1:792
+1412:1:790
+1413:1:798
+1414:1:799
+1415:1:803
+1416:0:4816
+1417:1:42
+1418:0:4816
+1419:1:43
+1420:0:4816
+1421:2:4085
+1422:0:4816
+1423:1:44
+1424:0:4816
+1425:2:4086
+1426:0:4816
+1427:1:145
+1428:0:4816
+1429:1:147
+1430:0:4816
+1431:2:4085
+1432:0:4816
+1433:1:46
+1434:0:4816
+1435:2:4086
+1436:0:4816
+1437:1:812
+1438:1:815
+1439:1:816
+1440:0:4816
+1441:1:42
+1442:0:4816
+1443:1:43
+1444:0:4816
+1445:2:4085
+1446:0:4816
+1447:1:44
+1448:0:4816
+1449:2:4086
+1450:0:4816
+1451:1:145
+1452:0:4816
+1453:1:147
+1454:0:4816
+1455:2:4085
+1456:0:4816
+1457:1:46
+1458:0:4816
+1459:2:4086
+1460:0:4816
+1461:1:819
+1462:1:820
+1463:1:824
+1464:1:825
+1465:1:833
+1466:1:834
+1467:1:838
+1468:1:839
+1469:1:847
+1470:1:852
+1471:1:856
+1472:1:857
+1473:1:865
+1474:1:866
+1475:1:870
+1476:1:871
+1477:1:865
+1478:1:866
+1479:1:870
+1480:1:871
+1481:1:879
+1482:1:884
+1483:1:885
+1484:1:896
+1485:1:897
+1486:1:898
+1487:1:909
+1488:1:914
+1489:1:915
+1490:1:926
+1491:1:927
+1492:1:928
+1493:1:926
+1494:1:934
+1495:1:935
+1496:1:939
+1497:0:4816
+1498:1:42
+1499:0:4816
+1500:1:43
+1501:0:4816
+1502:2:4085
+1503:0:4816
+1504:1:44
+1505:0:4816
+1506:2:4086
+1507:0:4816
+1508:1:145
+1509:0:4816
+1510:1:147
+1511:0:4816
+1512:2:4085
+1513:0:4816
+1514:1:46
+1515:0:4816
+1516:2:4086
+1517:0:4816
+1518:1:1079
+1519:1:1080
+1520:1:1084
+1521:1:1085
+1522:1:1093
+1523:1:1094
+1524:1:1098
+1525:1:1099
+1526:1:1107
+1527:1:1112
+1528:1:1116
+1529:1:1117
+1530:1:1125
+1531:1:1126
+1532:1:1130
+1533:1:1131
+1534:1:1125
+1535:1:1126
+1536:1:1130
+1537:1:1131
+1538:1:1139
+1539:1:1144
+1540:1:1145
+1541:1:1156
+1542:1:1157
+1543:1:1158
+1544:1:1169
+1545:1:1174
+1546:1:1175
+1547:1:1186
+1548:1:1187
+1549:1:1188
+1550:1:1186
+1551:1:1194
+1552:1:1195
+1553:1:1199
+1554:1:1206
+1555:1:1210
+1556:0:4816
+1557:1:42
+1558:0:4816
+1559:1:43
+1560:0:4816
+1561:2:4085
+1562:0:4816
+1563:1:44
+1564:0:4816
+1565:2:4086
+1566:0:4816
+1567:1:145
+1568:0:4816
+1569:1:147
+1570:0:4816
+1571:2:4085
+1572:0:4816
+1573:1:46
+1574:0:4816
+1575:2:4086
+1576:0:4816
+1577:1:1211
+1578:1:1212
+1579:1:1216
+1580:1:1217
+1581:1:1225
+1582:1:1226
+1583:1:1227
+1584:1:1239
+1585:1:1244
+1586:1:1248
+1587:1:1249
+1588:1:1257
+1589:1:1258
+1590:1:1262
+1591:1:1263
+1592:1:1257
+1593:1:1258
+1594:1:1262
+1595:1:1263
+1596:1:1271
+1597:1:1276
+1598:1:1277
+1599:1:1288
+1600:1:1289
+1601:1:1290
+1602:1:1301
+1603:1:1306
+1604:1:1307
+1605:1:1318
+1606:1:1319
+1607:1:1320
+1608:1:1318
+1609:1:1326
+1610:1:1327
+1611:1:1331
+1612:0:4816
+1613:1:42
+1614:0:4816
+1615:1:43
+1616:0:4816
+1617:2:4085
+1618:0:4816
+1619:1:44
+1620:0:4816
+1621:2:4086
+1622:0:4816
+1623:1:145
+1624:0:4816
+1625:1:147
+1626:0:4816
+1627:2:4085
+1628:0:4816
+1629:1:46
+1630:0:4816
+1631:2:4086
+1632:0:4816
+1633:1:1340
+1634:0:4816
+1635:2:4085
+1636:0:4816
+1637:1:2804
+1638:1:2808
+1639:1:2809
+1640:1:2817
+1641:1:2818
+1642:1:2822
+1643:1:2823
+1644:1:2831
+1645:1:2836
+1646:1:2840
+1647:1:2841
+1648:1:2849
+1649:1:2850
+1650:1:2854
+1651:1:2855
+1652:1:2849
+1653:1:2850
+1654:1:2854
+1655:1:2855
+1656:1:2863
+1657:1:2868
+1658:1:2869
+1659:1:2880
+1660:1:2881
+1661:1:2882
+1662:1:2893
+1663:1:2898
+1664:1:2899
+1665:1:2910
+1666:1:2911
+1667:1:2912
+1668:1:2910
+1669:1:2918
+1670:1:2919
+1671:1:2923
+1672:1:2927
+1673:0:4816
+1674:2:4086
+1675:0:4816
+1676:1:1342
+1677:1:1343
+1678:0:4814
+1679:1:42
+1680:0:4820
+1681:1:2421
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.define b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.define
new file mode 100644 (file)
index 0000000..710f29d
--- /dev/null
@@ -0,0 +1 @@
+#define NO_WMB
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.log b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.log
new file mode 100644 (file)
index 0000000..7da250d
--- /dev/null
@@ -0,0 +1,580 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_wmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1362)
+Depth=    8002 States=    1e+06 Transitions= 1.77e+08 Memory=   513.029        t=    264 R=   4e+03
+Depth=    9746 States=    2e+06 Transitions= 3.71e+08 Memory=   559.318        t=    570 R=   4e+03
+Depth=    9746 States=    3e+06 Transitions=  5.8e+08 Memory=   605.901        t=    916 R=   3e+03
+pan: resizing hashtable to -w22..  done
+Depth=    9746 States=    4e+06 Transitions=  7.6e+08 Memory=   682.920        t= 1.19e+03 R=   3e+03
+Depth=    9746 States=    5e+06 Transitions= 9.44e+08 Memory=   728.721        t= 1.47e+03 R=   3e+03
+Depth=    9746 States=    6e+06 Transitions= 1.35e+09 Memory=   775.303        t= 2.12e+03 R=   3e+03
+Depth=    9746 States=    7e+06 Transitions= 1.79e+09 Memory=   821.885        t= 2.84e+03 R=   2e+03
+Depth=    9746 States=    8e+06 Transitions= 2.11e+09 Memory=   868.076        t= 3.35e+03 R=   2e+03
+Depth=    9746 States=    9e+06 Transitions= 2.49e+09 Memory=   914.658        t= 3.99e+03 R=   2e+03
+pan: resizing hashtable to -w24..  done
+Depth=    9746 States=    1e+07 Transitions= 2.83e+09 Memory=  1085.529        t= 4.5e+03 R=   2e+03
+Depth=    9746 States=  1.1e+07 Transitions=  3.2e+09 Memory=  1132.795        t= 5.06e+03 R=   2e+03
+Depth=    9746 States=  1.2e+07 Transitions= 3.59e+09 Memory=  1179.670        t= 5.66e+03 R=   2e+03
+Depth=    9746 States=  1.3e+07 Transitions= 3.78e+09 Memory=  1226.447        t= 5.95e+03 R=   2e+03
+Depth=    9746 States=  1.4e+07 Transitions= 3.96e+09 Memory=  1272.346        t= 6.23e+03 R=   2e+03
+Depth=    9746 States=  1.5e+07 Transitions= 4.17e+09 Memory=  1318.440        t= 6.56e+03 R=   2e+03
+Depth=    9746 States=  1.6e+07 Transitions= 4.37e+09 Memory=  1364.338        t= 6.86e+03 R=   2e+03
+Depth=    9746 States=  1.7e+07 Transitions= 4.55e+09 Memory=  1410.139        t= 7.14e+03 R=   2e+03
+Depth=    9746 States=  1.8e+07 Transitions=  4.8e+09 Memory=  1456.135        t= 7.53e+03 R=   2e+03
+Depth=    9746 States=  1.9e+07 Transitions=  5.2e+09 Memory=  1502.326        t= 8.17e+03 R=   2e+03
+Depth=    9746 States=    2e+07 Transitions= 5.67e+09 Memory=  1548.615        t= 8.94e+03 R=   2e+03
+Depth=    9746 States=  2.1e+07 Transitions= 6.01e+09 Memory=  1594.611        t= 9.49e+03 R=   2e+03
+Depth=    9746 States=  2.2e+07 Transitions= 6.42e+09 Memory=  1640.998        t= 1.01e+04 R=   2e+03
+Depth=    9746 States=  2.3e+07 Transitions= 6.67e+09 Memory=  1687.580        t= 1.05e+04 R=   2e+03
+Depth=    9746 States=  2.4e+07 Transitions= 6.96e+09 Memory=  1733.674        t= 1.1e+04 R=   2e+03
+Depth=    9746 States=  2.5e+07 Transitions=  7.2e+09 Memory=  1779.865        t= 1.14e+04 R=   2e+03
+Depth=    9746 States=  2.6e+07 Transitions= 7.61e+09 Memory=  1826.154        t= 1.2e+04 R=   2e+03
+Depth=    9746 States=  2.7e+07 Transitions= 8.36e+09 Memory=  1872.248        t= 1.32e+04 R=   2e+03
+Depth=    9746 States=  2.8e+07 Transitions= 9.08e+09 Memory=  1918.733        t= 1.44e+04 R=   2e+03
+Depth=    9746 States=  2.9e+07 Transitions= 9.48e+09 Memory=  1965.608        t= 1.51e+04 R=   2e+03
+Depth=    9746 States=    3e+07 Transitions= 9.94e+09 Memory=  2012.483        t= 1.58e+04 R=   2e+03
+Depth=    9746 States=  3.1e+07 Transitions= 1.02e+10 Memory=  2058.576        t= 1.62e+04 R=   2e+03
+Depth=    9746 States=  3.2e+07 Transitions= 1.05e+10 Memory=  2104.475        t= 1.67e+04 R=   2e+03
+Depth=    9746 States=  3.3e+07 Transitions= 1.08e+10 Memory=  2150.373        t= 1.72e+04 R=   2e+03
+Depth=    9746 States=  3.4e+07 Transitions= 1.15e+10 Memory=  2196.369        t= 1.84e+04 R=   2e+03
+pan: resizing hashtable to -w26..  done
+Depth=    9746 States=  3.5e+07 Transitions= 1.22e+10 Memory=  2738.350        t= 1.96e+04 R=   2e+03
+Depth=    9746 States=  3.6e+07 Transitions= 1.27e+10 Memory=  2784.639        t= 2.03e+04 R=   2e+03
+Depth=    9746 States=  3.7e+07 Transitions= 1.29e+10 Memory=  2831.026        t= 2.07e+04 R=   2e+03
+Depth=    9746 States=  3.8e+07 Transitions= 1.34e+10 Memory=  2877.510        t= 2.14e+04 R=   2e+03
+Depth=    9746 States=  3.9e+07 Transitions= 1.39e+10 Memory=  2923.701        t= 2.21e+04 R=   2e+03
+Depth=    9746 States=    4e+07 Transitions= 1.44e+10 Memory=  2969.697        t= 2.29e+04 R=   2e+03
+Depth=    9746 States=  4.1e+07 Transitions= 1.48e+10 Memory=  3015.889        t= 2.36e+04 R=   2e+03
+Depth=    9746 States=  4.2e+07 Transitions= 1.52e+10 Memory=  3061.787        t= 2.42e+04 R=   2e+03
+Depth=    9746 States=  4.3e+07 Transitions= 1.57e+10 Memory=  3107.979        t= 2.5e+04 R=   2e+03
+Depth=    9746 States=  4.4e+07 Transitions= 1.61e+10 Memory=  3153.779        t= 2.56e+04 R=   2e+03
+Depth=    9746 States=  4.5e+07 Transitions= 1.64e+10 Memory=  3199.873        t= 2.6e+04 R=   2e+03
+Depth=    9746 States=  4.6e+07 Transitions= 1.68e+10 Memory=  3245.967        t= 2.66e+04 R=   2e+03
+Depth=    9746 States=  4.7e+07 Transitions=  1.7e+10 Memory=  3291.865        t= 2.7e+04 R=   2e+03
+Depth=    9746 States=  4.8e+07 Transitions= 1.74e+10 Memory=  3338.057        t= 2.75e+04 R=   2e+03
+Depth=    9746 States=  4.9e+07 Transitions= 1.79e+10 Memory=  3384.151        t= 2.83e+04 R=   2e+03
+Depth=    9868 States=    5e+07 Transitions= 1.82e+10 Memory=  3429.951        t= 2.88e+04 R=   2e+03
+Depth=    9912 States=  5.1e+07 Transitions= 1.86e+10 Memory=  3475.752        t= 2.94e+04 R=   2e+03
+Depth=    9912 States=  5.2e+07 Transitions= 1.89e+10 Memory=  3521.553        t= 2.99e+04 R=   2e+03
+Depth=    9912 States=  5.3e+07 Transitions= 1.93e+10 Memory=  3567.256        t= 3.05e+04 R=   2e+03
+Depth=    9912 States=  5.4e+07 Transitions= 1.96e+10 Memory=  3613.154        t= 3.1e+04 R=   2e+03
+Depth=    9912 States=  5.5e+07 Transitions=    2e+10 Memory=  3659.053        t= 3.16e+04 R=   2e+03
+Depth=    9912 States=  5.6e+07 Transitions= 2.03e+10 Memory=  3704.756        t= 3.2e+04 R=   2e+03
+Depth=    9912 States=  5.7e+07 Transitions= 2.06e+10 Memory=  3750.557        t= 3.25e+04 R=   2e+03
+Depth=    9912 States=  5.8e+07 Transitions=  2.1e+10 Memory=  3796.455        t= 3.31e+04 R=   2e+03
+Depth=    9912 States=  5.9e+07 Transitions= 2.13e+10 Memory=  3842.256        t= 3.37e+04 R=   2e+03
+Depth=    9912 States=    6e+07 Transitions= 2.16e+10 Memory=  3888.057        t= 3.42e+04 R=   2e+03
+Depth=    9912 States=  6.1e+07 Transitions=  2.2e+10 Memory=  3933.858        t= 3.47e+04 R=   2e+03
+Depth=    9912 States=  6.2e+07 Transitions= 2.23e+10 Memory=  3979.658        t= 3.52e+04 R=   2e+03
+Depth=    9912 States=  6.3e+07 Transitions= 2.27e+10 Memory=  4025.459        t= 3.58e+04 R=   2e+03
+Depth=    9912 States=  6.4e+07 Transitions=  2.3e+10 Memory=  4071.260        t= 3.63e+04 R=   2e+03
+Depth=    9912 States=  6.5e+07 Transitions= 2.33e+10 Memory=  4116.963        t= 3.69e+04 R=   2e+03
+Depth=    9912 States=  6.6e+07 Transitions= 2.36e+10 Memory=  4162.764        t= 3.73e+04 R=   2e+03
+Depth=    9912 States=  6.7e+07 Transitions= 2.39e+10 Memory=  4208.565        t= 3.78e+04 R=   2e+03
+Depth=    9912 States=  6.8e+07 Transitions= 2.42e+10 Memory=  4254.463        t= 3.83e+04 R=   2e+03
+Depth=    9912 States=  6.9e+07 Transitions= 2.46e+10 Memory=  4300.361        t= 3.88e+04 R=   2e+03
+Depth=    9912 States=    7e+07 Transitions=  2.5e+10 Memory=  4346.553        t= 3.94e+04 R=   2e+03
+Depth=    9912 States=  7.1e+07 Transitions= 2.53e+10 Memory=  4392.354        t=  4e+04 R=   2e+03
+Depth=    9912 States=  7.2e+07 Transitions= 2.58e+10 Memory=  4438.252        t= 4.07e+04 R=   2e+03
+Depth=    9912 States=  7.3e+07 Transitions= 2.61e+10 Memory=  4484.151        t= 4.12e+04 R=   2e+03
+Depth=    9912 States=  7.4e+07 Transitions= 2.66e+10 Memory=  4530.635        t= 4.19e+04 R=   2e+03
+Depth=    9912 States=  7.5e+07 Transitions= 2.69e+10 Memory=  4577.315        t= 4.25e+04 R=   2e+03
+Depth=    9912 States=  7.6e+07 Transitions= 2.74e+10 Memory=  4623.897        t= 4.32e+04 R=   2e+03
+Depth=    9912 States=  7.7e+07 Transitions= 2.78e+10 Memory=  4669.893        t= 4.39e+04 R=   2e+03
+Depth=    9912 States=  7.8e+07 Transitions= 2.82e+10 Memory=  4716.084        t= 4.45e+04 R=   2e+03
+Depth=    9912 States=  7.9e+07 Transitions= 2.85e+10 Memory=  4762.568        t= 4.5e+04 R=   2e+03
+Depth=    9912 States=    8e+07 Transitions= 2.87e+10 Memory=  4808.369        t= 4.52e+04 R=   2e+03
+Depth=    9912 States=  8.1e+07 Transitions= 2.89e+10 Memory=  4854.170        t= 4.56e+04 R=   2e+03
+Depth=    9912 States=  8.2e+07 Transitions= 2.91e+10 Memory=  4899.971        t= 4.59e+04 R=   2e+03
+Depth=    9912 States=  8.3e+07 Transitions= 2.93e+10 Memory=  4945.772        t= 4.61e+04 R=   2e+03
+Depth=    9912 States=  8.4e+07 Transitions= 2.95e+10 Memory=  4991.475        t= 4.64e+04 R=   2e+03
+Depth=    9912 States=  8.5e+07 Transitions= 2.99e+10 Memory=  5037.276        t= 4.71e+04 R=   2e+03
+Depth=    9912 States=  8.6e+07 Transitions= 3.03e+10 Memory=  5083.076        t= 4.78e+04 R=   2e+03
+Depth=    9912 States=  8.7e+07 Transitions= 3.07e+10 Memory=  5128.877        t= 4.83e+04 R=   2e+03
+pan: claim violated! (at depth 1530)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 9912, errors: 1
+ 87915830 states, stored
+3.0918075e+10 states, matched
+3.100599e+10 transitions (= stored+matched)
+1.742054e+11 atomic steps
+hash conflicts: 1.5948958e+10 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 9725.796      equivalent memory usage for states (stored*(State-vector + overhead))
+ 4202.002      actual memory usage for states (compression: 43.20%)
+               state-vector as stored = 14 byte + 36 byte overhead
+  512.000      memory used for hash table (-w26)
+  457.764      memory used for DFS stack (-m10000000)
+ 5170.869      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 517357 4332 4682 2 2 ]
+unreached in proctype urcu_reader
+       line 894, "pan.___", state 12, "((i<1))"
+       line 894, "pan.___", state 12, "((i>=1))"
+       line 268, "pan.___", state 57, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 88, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 104, "(1)"
+       line 249, "pan.___", state 112, "(1)"
+       line 253, "pan.___", state 124, "(1)"
+       line 257, "pan.___", state 132, "(1)"
+       line 407, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 190, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 204, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 223, "(1)"
+       line 434, "pan.___", state 253, "(1)"
+       line 438, "pan.___", state 266, "(1)"
+       line 687, "pan.___", state 287, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 407, "pan.___", state 294, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 326, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 340, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 359, "(1)"
+       line 434, "pan.___", state 389, "(1)"
+       line 438, "pan.___", state 402, "(1)"
+       line 407, "pan.___", state 423, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 455, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 469, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 488, "(1)"
+       line 434, "pan.___", state 518, "(1)"
+       line 438, "pan.___", state 531, "(1)"
+       line 407, "pan.___", state 554, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 556, "(1)"
+       line 407, "pan.___", state 557, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 557, "else"
+       line 407, "pan.___", state 560, "(1)"
+       line 411, "pan.___", state 568, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 570, "(1)"
+       line 411, "pan.___", state 571, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 571, "else"
+       line 411, "pan.___", state 574, "(1)"
+       line 411, "pan.___", state 575, "(1)"
+       line 411, "pan.___", state 575, "(1)"
+       line 409, "pan.___", state 580, "((i<1))"
+       line 409, "pan.___", state 580, "((i>=1))"
+       line 416, "pan.___", state 586, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 588, "(1)"
+       line 416, "pan.___", state 589, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 589, "else"
+       line 416, "pan.___", state 592, "(1)"
+       line 416, "pan.___", state 593, "(1)"
+       line 416, "pan.___", state 593, "(1)"
+       line 420, "pan.___", state 600, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 602, "(1)"
+       line 420, "pan.___", state 603, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 603, "else"
+       line 420, "pan.___", state 606, "(1)"
+       line 420, "pan.___", state 607, "(1)"
+       line 420, "pan.___", state 607, "(1)"
+       line 418, "pan.___", state 612, "((i<2))"
+       line 418, "pan.___", state 612, "((i>=2))"
+       line 425, "pan.___", state 619, "(1)"
+       line 425, "pan.___", state 620, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 620, "else"
+       line 425, "pan.___", state 623, "(1)"
+       line 425, "pan.___", state 624, "(1)"
+       line 425, "pan.___", state 624, "(1)"
+       line 429, "pan.___", state 632, "(1)"
+       line 429, "pan.___", state 633, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 633, "else"
+       line 429, "pan.___", state 636, "(1)"
+       line 429, "pan.___", state 637, "(1)"
+       line 429, "pan.___", state 637, "(1)"
+       line 427, "pan.___", state 642, "((i<1))"
+       line 427, "pan.___", state 642, "((i>=1))"
+       line 434, "pan.___", state 649, "(1)"
+       line 434, "pan.___", state 650, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 650, "else"
+       line 434, "pan.___", state 653, "(1)"
+       line 434, "pan.___", state 654, "(1)"
+       line 434, "pan.___", state 654, "(1)"
+       line 438, "pan.___", state 662, "(1)"
+       line 438, "pan.___", state 663, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 663, "else"
+       line 438, "pan.___", state 666, "(1)"
+       line 438, "pan.___", state 667, "(1)"
+       line 438, "pan.___", state 667, "(1)"
+       line 436, "pan.___", state 672, "((i<2))"
+       line 436, "pan.___", state 672, "((i>=2))"
+       line 446, "pan.___", state 676, "(1)"
+       line 446, "pan.___", state 676, "(1)"
+       line 687, "pan.___", state 679, "cached_urcu_active_readers = (tmp+1)"
+       line 687, "pan.___", state 680, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 687, "pan.___", state 681, "(1)"
+       line 407, "pan.___", state 688, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 720, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 734, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 753, "(1)"
+       line 434, "pan.___", state 783, "(1)"
+       line 438, "pan.___", state 796, "(1)"
+       line 407, "pan.___", state 824, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 856, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 870, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 889, "(1)"
+       line 434, "pan.___", state 919, "(1)"
+       line 438, "pan.___", state 932, "(1)"
+       line 407, "pan.___", state 953, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 955, "(1)"
+       line 407, "pan.___", state 956, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 956, "else"
+       line 407, "pan.___", state 959, "(1)"
+       line 411, "pan.___", state 967, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 969, "(1)"
+       line 411, "pan.___", state 970, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 970, "else"
+       line 411, "pan.___", state 973, "(1)"
+       line 411, "pan.___", state 974, "(1)"
+       line 411, "pan.___", state 974, "(1)"
+       line 409, "pan.___", state 979, "((i<1))"
+       line 409, "pan.___", state 979, "((i>=1))"
+       line 416, "pan.___", state 985, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 987, "(1)"
+       line 416, "pan.___", state 988, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 988, "else"
+       line 416, "pan.___", state 991, "(1)"
+       line 416, "pan.___", state 992, "(1)"
+       line 416, "pan.___", state 992, "(1)"
+       line 420, "pan.___", state 999, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 1001, "(1)"
+       line 420, "pan.___", state 1002, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 1002, "else"
+       line 420, "pan.___", state 1005, "(1)"
+       line 420, "pan.___", state 1006, "(1)"
+       line 420, "pan.___", state 1006, "(1)"
+       line 418, "pan.___", state 1011, "((i<2))"
+       line 418, "pan.___", state 1011, "((i>=2))"
+       line 425, "pan.___", state 1018, "(1)"
+       line 425, "pan.___", state 1019, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 1019, "else"
+       line 425, "pan.___", state 1022, "(1)"
+       line 425, "pan.___", state 1023, "(1)"
+       line 425, "pan.___", state 1023, "(1)"
+       line 429, "pan.___", state 1031, "(1)"
+       line 429, "pan.___", state 1032, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 1032, "else"
+       line 429, "pan.___", state 1035, "(1)"
+       line 429, "pan.___", state 1036, "(1)"
+       line 429, "pan.___", state 1036, "(1)"
+       line 427, "pan.___", state 1041, "((i<1))"
+       line 427, "pan.___", state 1041, "((i>=1))"
+       line 434, "pan.___", state 1048, "(1)"
+       line 434, "pan.___", state 1049, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 1049, "else"
+       line 434, "pan.___", state 1052, "(1)"
+       line 434, "pan.___", state 1053, "(1)"
+       line 434, "pan.___", state 1053, "(1)"
+       line 438, "pan.___", state 1061, "(1)"
+       line 438, "pan.___", state 1062, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 1062, "else"
+       line 438, "pan.___", state 1065, "(1)"
+       line 438, "pan.___", state 1066, "(1)"
+       line 438, "pan.___", state 1066, "(1)"
+       line 436, "pan.___", state 1071, "((i<2))"
+       line 436, "pan.___", state 1071, "((i>=2))"
+       line 446, "pan.___", state 1075, "(1)"
+       line 446, "pan.___", state 1075, "(1)"
+       line 695, "pan.___", state 1079, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 407, "pan.___", state 1084, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1116, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1130, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1149, "(1)"
+       line 434, "pan.___", state 1179, "(1)"
+       line 438, "pan.___", state 1192, "(1)"
+       line 407, "pan.___", state 1216, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1248, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1262, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1281, "(1)"
+       line 434, "pan.___", state 1311, "(1)"
+       line 438, "pan.___", state 1324, "(1)"
+       line 407, "pan.___", state 1349, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1381, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1395, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1414, "(1)"
+       line 434, "pan.___", state 1444, "(1)"
+       line 438, "pan.___", state 1457, "(1)"
+       line 407, "pan.___", state 1478, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1510, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1524, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1543, "(1)"
+       line 434, "pan.___", state 1573, "(1)"
+       line 438, "pan.___", state 1586, "(1)"
+       line 407, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1644, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1658, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1677, "(1)"
+       line 434, "pan.___", state 1707, "(1)"
+       line 438, "pan.___", state 1720, "(1)"
+       line 407, "pan.___", state 1741, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1773, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1787, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1806, "(1)"
+       line 434, "pan.___", state 1836, "(1)"
+       line 438, "pan.___", state 1849, "(1)"
+       line 407, "pan.___", state 1873, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1905, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1919, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1938, "(1)"
+       line 434, "pan.___", state 1968, "(1)"
+       line 438, "pan.___", state 1981, "(1)"
+       line 734, "pan.___", state 2002, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 407, "pan.___", state 2009, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2041, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2055, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2074, "(1)"
+       line 434, "pan.___", state 2104, "(1)"
+       line 438, "pan.___", state 2117, "(1)"
+       line 407, "pan.___", state 2138, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2170, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2184, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2203, "(1)"
+       line 434, "pan.___", state 2233, "(1)"
+       line 438, "pan.___", state 2246, "(1)"
+       line 407, "pan.___", state 2269, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 2271, "(1)"
+       line 407, "pan.___", state 2272, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 2272, "else"
+       line 407, "pan.___", state 2275, "(1)"
+       line 411, "pan.___", state 2283, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 2285, "(1)"
+       line 411, "pan.___", state 2286, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 2286, "else"
+       line 411, "pan.___", state 2289, "(1)"
+       line 411, "pan.___", state 2290, "(1)"
+       line 411, "pan.___", state 2290, "(1)"
+       line 409, "pan.___", state 2295, "((i<1))"
+       line 409, "pan.___", state 2295, "((i>=1))"
+       line 416, "pan.___", state 2301, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 2303, "(1)"
+       line 416, "pan.___", state 2304, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 2304, "else"
+       line 416, "pan.___", state 2307, "(1)"
+       line 416, "pan.___", state 2308, "(1)"
+       line 416, "pan.___", state 2308, "(1)"
+       line 420, "pan.___", state 2315, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 2317, "(1)"
+       line 420, "pan.___", state 2318, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 2318, "else"
+       line 420, "pan.___", state 2321, "(1)"
+       line 420, "pan.___", state 2322, "(1)"
+       line 420, "pan.___", state 2322, "(1)"
+       line 418, "pan.___", state 2327, "((i<2))"
+       line 418, "pan.___", state 2327, "((i>=2))"
+       line 425, "pan.___", state 2334, "(1)"
+       line 425, "pan.___", state 2335, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 2335, "else"
+       line 425, "pan.___", state 2338, "(1)"
+       line 425, "pan.___", state 2339, "(1)"
+       line 425, "pan.___", state 2339, "(1)"
+       line 429, "pan.___", state 2347, "(1)"
+       line 429, "pan.___", state 2348, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 2348, "else"
+       line 429, "pan.___", state 2351, "(1)"
+       line 429, "pan.___", state 2352, "(1)"
+       line 429, "pan.___", state 2352, "(1)"
+       line 427, "pan.___", state 2357, "((i<1))"
+       line 427, "pan.___", state 2357, "((i>=1))"
+       line 434, "pan.___", state 2364, "(1)"
+       line 434, "pan.___", state 2365, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 2365, "else"
+       line 434, "pan.___", state 2368, "(1)"
+       line 434, "pan.___", state 2369, "(1)"
+       line 434, "pan.___", state 2369, "(1)"
+       line 438, "pan.___", state 2377, "(1)"
+       line 438, "pan.___", state 2378, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 2378, "else"
+       line 438, "pan.___", state 2381, "(1)"
+       line 438, "pan.___", state 2382, "(1)"
+       line 438, "pan.___", state 2382, "(1)"
+       line 436, "pan.___", state 2387, "((i<2))"
+       line 436, "pan.___", state 2387, "((i>=2))"
+       line 446, "pan.___", state 2391, "(1)"
+       line 446, "pan.___", state 2391, "(1)"
+       line 734, "pan.___", state 2394, "cached_urcu_active_readers = (tmp+1)"
+       line 734, "pan.___", state 2395, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 734, "pan.___", state 2396, "(1)"
+       line 407, "pan.___", state 2403, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2435, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2449, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2468, "(1)"
+       line 434, "pan.___", state 2498, "(1)"
+       line 438, "pan.___", state 2511, "(1)"
+       line 407, "pan.___", state 2538, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2570, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2584, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2603, "(1)"
+       line 434, "pan.___", state 2633, "(1)"
+       line 438, "pan.___", state 2646, "(1)"
+       line 407, "pan.___", state 2667, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2699, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2713, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2732, "(1)"
+       line 434, "pan.___", state 2762, "(1)"
+       line 438, "pan.___", state 2775, "(1)"
+       line 245, "pan.___", state 2808, "(1)"
+       line 253, "pan.___", state 2828, "(1)"
+       line 257, "pan.___", state 2836, "(1)"
+       line 245, "pan.___", state 2851, "(1)"
+       line 253, "pan.___", state 2871, "(1)"
+       line 257, "pan.___", state 2879, "(1)"
+       line 929, "pan.___", state 2896, "-end-"
+       (246 of 2896 states)
+unreached in proctype urcu_writer
+       line 1018, "pan.___", state 12, "((i<1))"
+       line 1018, "pan.___", state 12, "((i>=1))"
+       line 407, "pan.___", state 46, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 60, "cache_dirty_urcu_active_readers = 0"
+       line 425, "pan.___", state 111, "(1)"
+       line 429, "pan.___", state 124, "(1)"
+       line 268, "pan.___", state 177, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 179, "(1)"
+       line 272, "pan.___", state 186, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 188, "(1)"
+       line 272, "pan.___", state 189, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 189, "else"
+       line 270, "pan.___", state 194, "((i<1))"
+       line 270, "pan.___", state 194, "((i>=1))"
+       line 276, "pan.___", state 199, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 201, "(1)"
+       line 276, "pan.___", state 202, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 202, "else"
+       line 280, "pan.___", state 208, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 210, "(1)"
+       line 280, "pan.___", state 211, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 211, "else"
+       line 285, "pan.___", state 220, "(cache_dirty_urcu_gp_ctr)"
+       line 285, "pan.___", state 220, "else"
+       line 407, "pan.___", state 239, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 253, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 271, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 285, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 304, "(1)"
+       line 429, "pan.___", state 317, "(1)"
+       line 434, "pan.___", state 334, "(1)"
+       line 438, "pan.___", state 347, "(1)"
+       line 411, "pan.___", state 384, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 402, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 416, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 448, "(1)"
+       line 434, "pan.___", state 465, "(1)"
+       line 438, "pan.___", state 478, "(1)"
+       line 411, "pan.___", state 523, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 541, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 555, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 587, "(1)"
+       line 434, "pan.___", state 604, "(1)"
+       line 438, "pan.___", state 617, "(1)"
+       line 411, "pan.___", state 652, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 670, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 684, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 716, "(1)"
+       line 434, "pan.___", state 733, "(1)"
+       line 438, "pan.___", state 746, "(1)"
+       line 411, "pan.___", state 783, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 801, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 815, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 847, "(1)"
+       line 434, "pan.___", state 864, "(1)"
+       line 438, "pan.___", state 877, "(1)"
+       line 268, "pan.___", state 932, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 941, "cache_dirty_urcu_active_readers = 0"
+       line 245, "pan.___", state 979, "(1)"
+       line 249, "pan.___", state 987, "(1)"
+       line 253, "pan.___", state 999, "(1)"
+       line 257, "pan.___", state 1007, "(1)"
+       line 268, "pan.___", state 1038, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1047, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1060, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1069, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1085, "(1)"
+       line 249, "pan.___", state 1093, "(1)"
+       line 253, "pan.___", state 1105, "(1)"
+       line 257, "pan.___", state 1113, "(1)"
+       line 272, "pan.___", state 1139, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1152, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1161, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1177, "(1)"
+       line 249, "pan.___", state 1185, "(1)"
+       line 253, "pan.___", state 1197, "(1)"
+       line 257, "pan.___", state 1205, "(1)"
+       line 268, "pan.___", state 1236, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1245, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1258, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1267, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1283, "(1)"
+       line 249, "pan.___", state 1291, "(1)"
+       line 253, "pan.___", state 1303, "(1)"
+       line 257, "pan.___", state 1311, "(1)"
+       line 272, "pan.___", state 1337, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1350, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1359, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1375, "(1)"
+       line 249, "pan.___", state 1383, "(1)"
+       line 253, "pan.___", state 1395, "(1)"
+       line 257, "pan.___", state 1403, "(1)"
+       line 268, "pan.___", state 1434, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1443, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1456, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1465, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1481, "(1)"
+       line 249, "pan.___", state 1489, "(1)"
+       line 253, "pan.___", state 1501, "(1)"
+       line 257, "pan.___", state 1509, "(1)"
+       line 272, "pan.___", state 1535, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1548, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1557, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1573, "(1)"
+       line 249, "pan.___", state 1581, "(1)"
+       line 253, "pan.___", state 1593, "(1)"
+       line 257, "pan.___", state 1601, "(1)"
+       line 268, "pan.___", state 1632, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1641, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1654, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1663, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1679, "(1)"
+       line 249, "pan.___", state 1687, "(1)"
+       line 253, "pan.___", state 1699, "(1)"
+       line 257, "pan.___", state 1707, "(1)"
+       line 1304, "pan.___", state 1723, "-end-"
+       (110 of 1723 states)
+unreached in proctype :init:
+       line 1319, "pan.___", state 13, "((i<1))"
+       line 1319, "pan.___", state 13, "((i>=1))"
+       (1 of 28 states)
+unreached in proctype :never:
+       line 1367, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 4.88e+04 seconds
+pan: rate 1799.8673 states/second
+pan: avg transition delay 1.5754e-06 usec
+cp .input.spin urcu_free_no_wmb.spin.input
+cp .input.spin.trail urcu_free_no_wmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.spin.input b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.spin.input
new file mode 100644 (file)
index 0000000..4159c77
--- /dev/null
@@ -0,0 +1,1340 @@
+#define NO_WMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.spin.input.trail b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_no_wmb.spin.input.trail
new file mode 100644 (file)
index 0000000..f280086
--- /dev/null
@@ -0,0 +1,1533 @@
+-2:3:-2
+-4:-4:-4
+1:0:4649
+2:2:2896
+3:2:2901
+4:2:2905
+5:2:2913
+6:2:2917
+7:2:2921
+8:0:4649
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:4649
+16:3:4619
+17:3:4622
+18:3:4629
+19:3:4636
+20:3:4639
+21:3:4643
+22:3:4644
+23:0:4649
+24:3:4646
+25:0:4649
+26:2:2925
+27:0:4649
+28:2:2931
+29:0:4649
+30:2:2932
+31:0:4649
+32:2:2934
+33:0:4649
+34:2:2935
+35:0:4649
+36:2:2936
+37:0:4649
+38:2:2937
+39:2:2938
+40:2:2942
+41:2:2943
+42:2:2951
+43:2:2952
+44:2:2956
+45:2:2957
+46:2:2965
+47:2:2970
+48:2:2974
+49:2:2975
+50:2:2983
+51:2:2984
+52:2:2988
+53:2:2989
+54:2:2983
+55:2:2984
+56:2:2988
+57:2:2989
+58:2:2997
+59:2:3002
+60:2:3003
+61:2:3014
+62:2:3015
+63:2:3016
+64:2:3027
+65:2:3032
+66:2:3033
+67:2:3044
+68:2:3045
+69:2:3046
+70:2:3044
+71:2:3045
+72:2:3046
+73:2:3057
+74:2:3065
+75:0:4649
+76:2:2936
+77:0:4649
+78:2:3117
+79:2:3118
+80:2:3119
+81:0:4649
+82:2:2936
+83:0:4649
+84:2:3124
+85:0:4649
+86:2:3828
+87:2:3829
+88:2:3833
+89:2:3837
+90:2:3838
+91:2:3842
+92:2:3847
+93:2:3855
+94:2:3859
+95:2:3860
+96:2:3855
+97:2:3856
+98:2:3864
+99:2:3871
+100:2:3878
+101:2:3879
+102:2:3886
+103:2:3891
+104:2:3898
+105:2:3899
+106:2:3898
+107:2:3899
+108:2:3906
+109:2:3910
+110:0:4649
+111:2:3915
+112:0:4649
+113:2:3916
+114:0:4649
+115:2:3917
+116:0:4649
+117:2:3918
+118:0:4649
+119:1:29
+120:0:4649
+121:2:3919
+122:0:4649
+123:1:35
+124:0:4649
+125:1:36
+126:0:4649
+127:2:3918
+128:0:4649
+129:1:37
+130:0:4649
+131:2:3919
+132:0:4649
+133:1:38
+134:0:4649
+135:2:3918
+136:0:4649
+137:1:39
+138:0:4649
+139:2:3919
+140:0:4649
+141:1:40
+142:0:4649
+143:2:3918
+144:0:4649
+145:1:41
+146:0:4649
+147:2:3919
+148:0:4649
+149:1:42
+150:0:4649
+151:1:43
+152:0:4649
+153:2:3918
+154:0:4649
+155:1:44
+156:0:4649
+157:2:3919
+158:0:4649
+159:1:53
+160:0:4649
+161:2:3918
+162:0:4649
+163:1:57
+164:1:58
+165:1:62
+166:1:66
+167:1:67
+168:1:71
+169:1:79
+170:1:80
+171:1:84
+172:1:88
+173:1:89
+174:1:84
+175:1:88
+176:1:89
+177:1:93
+178:1:100
+179:1:107
+180:1:108
+181:1:115
+182:1:120
+183:1:127
+184:1:128
+185:1:127
+186:1:128
+187:1:135
+188:1:139
+189:0:4649
+190:2:3919
+191:0:4649
+192:1:144
+193:0:4649
+194:2:3920
+195:0:4649
+196:2:3925
+197:0:4649
+198:2:3926
+199:0:4649
+200:2:3934
+201:2:3935
+202:2:3939
+203:2:3943
+204:2:3944
+205:2:3948
+206:2:3956
+207:2:3957
+208:2:3961
+209:2:3965
+210:2:3966
+211:2:3961
+212:2:3965
+213:2:3966
+214:2:3970
+215:2:3977
+216:2:3984
+217:2:3985
+218:2:3992
+219:2:3997
+220:2:4004
+221:2:4005
+222:2:4004
+223:2:4005
+224:2:4012
+225:2:4016
+226:0:4649
+227:2:3126
+228:2:3809
+229:0:4649
+230:2:2936
+231:0:4649
+232:2:3127
+233:0:4649
+234:2:2936
+235:0:4649
+236:2:3130
+237:2:3131
+238:2:3135
+239:2:3136
+240:2:3144
+241:2:3145
+242:2:3149
+243:2:3150
+244:2:3158
+245:2:3163
+246:2:3167
+247:2:3168
+248:2:3176
+249:2:3177
+250:2:3181
+251:2:3182
+252:2:3176
+253:2:3177
+254:2:3181
+255:2:3182
+256:2:3190
+257:2:3195
+258:2:3196
+259:2:3207
+260:2:3208
+261:2:3209
+262:2:3220
+263:2:3225
+264:2:3226
+265:2:3237
+266:2:3238
+267:2:3239
+268:2:3237
+269:2:3238
+270:2:3239
+271:2:3250
+272:2:3257
+273:0:4649
+274:2:2936
+275:0:4649
+276:2:3261
+277:2:3262
+278:2:3263
+279:2:3275
+280:2:3276
+281:2:3280
+282:2:3281
+283:2:3289
+284:2:3294
+285:2:3298
+286:2:3299
+287:2:3307
+288:2:3308
+289:2:3312
+290:2:3313
+291:2:3307
+292:2:3308
+293:2:3312
+294:2:3313
+295:2:3321
+296:2:3326
+297:2:3327
+298:2:3338
+299:2:3339
+300:2:3340
+301:2:3351
+302:2:3356
+303:2:3357
+304:2:3368
+305:2:3369
+306:2:3370
+307:2:3368
+308:2:3369
+309:2:3370
+310:2:3381
+311:2:3392
+312:2:3393
+313:0:4649
+314:2:2936
+315:0:4649
+316:2:3400
+317:2:3401
+318:2:3405
+319:2:3406
+320:2:3414
+321:2:3415
+322:2:3419
+323:2:3420
+324:2:3428
+325:2:3433
+326:2:3437
+327:2:3438
+328:2:3446
+329:2:3447
+330:2:3451
+331:2:3452
+332:2:3446
+333:2:3447
+334:2:3451
+335:2:3452
+336:2:3460
+337:2:3465
+338:2:3466
+339:2:3477
+340:2:3478
+341:2:3479
+342:2:3490
+343:2:3495
+344:2:3496
+345:2:3507
+346:2:3508
+347:2:3509
+348:2:3507
+349:2:3508
+350:2:3509
+351:2:3520
+352:0:4649
+353:2:2936
+354:0:4649
+355:2:3529
+356:2:3530
+357:2:3534
+358:2:3535
+359:2:3543
+360:2:3544
+361:2:3548
+362:2:3549
+363:2:3557
+364:2:3562
+365:2:3566
+366:2:3567
+367:2:3575
+368:2:3576
+369:2:3580
+370:2:3581
+371:2:3575
+372:2:3576
+373:2:3580
+374:2:3581
+375:2:3589
+376:2:3594
+377:2:3595
+378:2:3606
+379:2:3607
+380:2:3608
+381:2:3619
+382:2:3624
+383:2:3625
+384:2:3636
+385:2:3637
+386:2:3638
+387:2:3636
+388:2:3637
+389:2:3638
+390:2:3649
+391:2:3656
+392:0:4649
+393:2:2936
+394:0:4649
+395:2:3660
+396:2:3661
+397:2:3662
+398:2:3674
+399:2:3675
+400:2:3679
+401:2:3680
+402:2:3688
+403:2:3693
+404:2:3697
+405:2:3698
+406:2:3706
+407:2:3707
+408:2:3711
+409:2:3712
+410:2:3706
+411:2:3707
+412:2:3711
+413:2:3712
+414:2:3720
+415:2:3725
+416:2:3726
+417:2:3737
+418:2:3738
+419:2:3739
+420:2:3750
+421:2:3755
+422:2:3756
+423:2:3767
+424:2:3768
+425:2:3769
+426:2:3767
+427:2:3768
+428:2:3769
+429:2:3780
+430:2:3790
+431:2:3791
+432:0:4649
+433:2:2936
+434:0:4649
+435:2:3797
+436:0:4649
+437:2:4422
+438:2:4423
+439:2:4427
+440:2:4431
+441:2:4432
+442:2:4436
+443:2:4444
+444:2:4445
+445:2:4449
+446:2:4453
+447:2:4454
+448:2:4449
+449:2:4453
+450:2:4454
+451:2:4458
+452:2:4465
+453:2:4472
+454:2:4473
+455:2:4480
+456:2:4485
+457:2:4492
+458:2:4493
+459:2:4492
+460:2:4493
+461:2:4500
+462:2:4504
+463:0:4649
+464:2:4509
+465:0:4649
+466:2:4510
+467:0:4649
+468:2:4511
+469:0:4649
+470:2:4512
+471:0:4649
+472:1:53
+473:0:4649
+474:2:4513
+475:0:4649
+476:1:57
+477:1:58
+478:1:62
+479:1:66
+480:1:67
+481:1:71
+482:1:79
+483:1:80
+484:1:84
+485:1:88
+486:1:89
+487:1:84
+488:1:88
+489:1:89
+490:1:93
+491:1:100
+492:1:107
+493:1:108
+494:1:115
+495:1:120
+496:1:127
+497:1:128
+498:1:127
+499:1:128
+500:1:135
+501:1:139
+502:0:4649
+503:2:4512
+504:0:4649
+505:1:144
+506:0:4649
+507:2:4513
+508:0:4649
+509:2:4514
+510:0:4649
+511:2:4519
+512:0:4649
+513:2:4520
+514:0:4649
+515:2:4528
+516:2:4529
+517:2:4533
+518:2:4537
+519:2:4538
+520:2:4542
+521:2:4550
+522:2:4551
+523:2:4555
+524:2:4559
+525:2:4560
+526:2:4555
+527:2:4559
+528:2:4560
+529:2:4564
+530:2:4571
+531:2:4578
+532:2:4579
+533:2:4586
+534:2:4591
+535:2:4598
+536:2:4599
+537:2:4598
+538:2:4599
+539:2:4606
+540:2:4610
+541:0:4649
+542:2:3799
+543:2:3809
+544:0:4649
+545:2:2936
+546:0:4649
+547:2:3800
+548:2:3801
+549:0:4649
+550:2:2936
+551:0:4649
+552:2:3805
+553:0:4649
+554:2:3813
+555:0:4649
+556:2:2932
+557:0:4649
+558:2:2934
+559:0:4649
+560:2:2935
+561:0:4649
+562:2:2936
+563:0:4649
+564:2:3117
+565:2:3118
+566:2:3119
+567:0:4649
+568:2:2936
+569:0:4649
+570:2:2937
+571:2:2938
+572:2:2942
+573:2:2943
+574:2:2951
+575:2:2952
+576:2:2956
+577:2:2957
+578:2:2965
+579:2:2970
+580:2:2971
+581:2:2983
+582:2:2984
+583:2:2985
+584:2:2983
+585:2:2984
+586:2:2988
+587:2:2989
+588:2:2997
+589:2:3002
+590:2:3003
+591:2:3014
+592:2:3015
+593:2:3016
+594:2:3027
+595:2:3032
+596:2:3033
+597:2:3044
+598:2:3045
+599:2:3046
+600:2:3044
+601:2:3045
+602:2:3046
+603:2:3057
+604:2:3065
+605:0:4649
+606:2:2936
+607:0:4649
+608:2:3124
+609:0:4649
+610:2:3828
+611:2:3829
+612:2:3833
+613:2:3837
+614:2:3838
+615:2:3842
+616:2:3850
+617:2:3851
+618:2:3855
+619:2:3856
+620:2:3855
+621:2:3859
+622:2:3860
+623:2:3864
+624:2:3871
+625:2:3878
+626:2:3879
+627:2:3886
+628:2:3891
+629:2:3898
+630:2:3899
+631:2:3898
+632:2:3899
+633:2:3906
+634:2:3910
+635:0:4649
+636:2:3915
+637:0:4649
+638:2:3916
+639:0:4649
+640:2:3917
+641:0:4649
+642:2:3918
+643:0:4649
+644:1:53
+645:0:4649
+646:2:3919
+647:0:4649
+648:1:57
+649:1:58
+650:1:62
+651:1:66
+652:1:67
+653:1:71
+654:1:79
+655:1:80
+656:1:84
+657:1:88
+658:1:89
+659:1:84
+660:1:88
+661:1:89
+662:1:93
+663:1:100
+664:1:107
+665:1:108
+666:1:115
+667:1:120
+668:1:127
+669:1:128
+670:1:127
+671:1:128
+672:1:135
+673:1:139
+674:0:4649
+675:2:3918
+676:0:4649
+677:1:144
+678:0:4649
+679:2:3919
+680:0:4649
+681:2:3920
+682:0:4649
+683:2:3925
+684:0:4649
+685:2:3926
+686:0:4649
+687:2:3934
+688:2:3935
+689:2:3939
+690:2:3943
+691:2:3944
+692:2:3948
+693:2:3956
+694:2:3957
+695:2:3961
+696:2:3965
+697:2:3966
+698:2:3961
+699:2:3965
+700:2:3966
+701:2:3970
+702:2:3977
+703:2:3984
+704:2:3985
+705:2:3992
+706:2:3997
+707:2:4004
+708:2:4005
+709:2:4004
+710:2:4005
+711:2:4012
+712:2:4016
+713:0:4649
+714:2:3126
+715:2:3809
+716:0:4649
+717:2:2936
+718:0:4649
+719:2:3127
+720:0:4649
+721:2:2936
+722:0:4649
+723:2:3130
+724:2:3131
+725:2:3135
+726:2:3136
+727:2:3144
+728:2:3145
+729:2:3149
+730:2:3150
+731:2:3158
+732:2:3163
+733:2:3167
+734:2:3168
+735:2:3176
+736:2:3177
+737:2:3181
+738:2:3182
+739:2:3176
+740:2:3177
+741:2:3181
+742:2:3182
+743:2:3190
+744:2:3195
+745:2:3196
+746:2:3207
+747:2:3208
+748:2:3209
+749:2:3220
+750:2:3225
+751:2:3226
+752:2:3237
+753:2:3238
+754:2:3239
+755:2:3237
+756:2:3238
+757:2:3239
+758:2:3250
+759:2:3257
+760:0:4649
+761:2:2936
+762:0:4649
+763:2:3261
+764:2:3262
+765:2:3263
+766:2:3275
+767:2:3276
+768:2:3280
+769:2:3281
+770:2:3289
+771:2:3294
+772:2:3298
+773:2:3299
+774:2:3307
+775:2:3308
+776:2:3312
+777:2:3313
+778:2:3307
+779:2:3308
+780:2:3312
+781:2:3313
+782:2:3321
+783:2:3326
+784:2:3327
+785:2:3338
+786:2:3339
+787:2:3340
+788:2:3351
+789:2:3356
+790:2:3357
+791:2:3368
+792:2:3369
+793:2:3370
+794:2:3368
+795:2:3369
+796:2:3370
+797:2:3381
+798:2:3392
+799:2:3393
+800:0:4649
+801:2:2936
+802:0:4649
+803:2:3400
+804:2:3401
+805:2:3405
+806:2:3406
+807:2:3414
+808:2:3415
+809:2:3419
+810:2:3420
+811:2:3428
+812:2:3433
+813:2:3437
+814:2:3438
+815:2:3446
+816:2:3447
+817:2:3451
+818:2:3452
+819:2:3446
+820:2:3447
+821:2:3451
+822:2:3452
+823:2:3460
+824:2:3465
+825:2:3466
+826:2:3477
+827:2:3478
+828:2:3479
+829:2:3490
+830:2:3495
+831:2:3496
+832:2:3507
+833:2:3508
+834:2:3509
+835:2:3507
+836:2:3508
+837:2:3509
+838:2:3520
+839:0:4649
+840:2:2936
+841:0:4649
+842:2:3529
+843:2:3530
+844:2:3534
+845:2:3535
+846:2:3543
+847:2:3544
+848:2:3548
+849:2:3549
+850:2:3557
+851:2:3562
+852:2:3566
+853:2:3567
+854:2:3575
+855:2:3576
+856:2:3580
+857:2:3581
+858:2:3575
+859:2:3576
+860:2:3580
+861:2:3581
+862:2:3589
+863:2:3594
+864:2:3595
+865:2:3606
+866:2:3607
+867:2:3608
+868:2:3619
+869:2:3624
+870:2:3625
+871:2:3636
+872:2:3637
+873:2:3638
+874:2:3636
+875:2:3637
+876:2:3638
+877:2:3649
+878:2:3656
+879:0:4649
+880:2:2936
+881:0:4649
+882:2:3660
+883:2:3661
+884:2:3662
+885:2:3674
+886:2:3675
+887:2:3679
+888:2:3680
+889:2:3688
+890:2:3693
+891:2:3697
+892:2:3698
+893:2:3706
+894:2:3707
+895:2:3711
+896:2:3712
+897:2:3706
+898:2:3707
+899:2:3711
+900:2:3712
+901:2:3720
+902:2:3725
+903:2:3726
+904:2:3737
+905:2:3738
+906:2:3739
+907:2:3750
+908:2:3755
+909:2:3756
+910:2:3767
+911:2:3768
+912:2:3769
+913:2:3767
+914:2:3768
+915:2:3769
+916:2:3780
+917:2:3790
+918:2:3791
+919:0:4649
+920:2:2936
+921:0:4649
+922:2:3797
+923:0:4649
+924:2:4422
+925:2:4423
+926:2:4427
+927:2:4431
+928:2:4432
+929:2:4436
+930:2:4444
+931:2:4445
+932:2:4449
+933:2:4453
+934:2:4454
+935:2:4449
+936:2:4453
+937:2:4454
+938:2:4458
+939:2:4465
+940:2:4472
+941:2:4473
+942:2:4480
+943:2:4485
+944:2:4492
+945:2:4493
+946:2:4492
+947:2:4493
+948:2:4500
+949:2:4504
+950:0:4649
+951:2:4509
+952:0:4649
+953:2:4510
+954:0:4649
+955:2:4511
+956:0:4649
+957:2:4512
+958:0:4649
+959:1:53
+960:0:4649
+961:2:4513
+962:0:4649
+963:1:57
+964:1:58
+965:1:62
+966:1:66
+967:1:67
+968:1:71
+969:1:79
+970:1:80
+971:1:84
+972:1:88
+973:1:89
+974:1:84
+975:1:88
+976:1:89
+977:1:93
+978:1:100
+979:1:107
+980:1:108
+981:1:115
+982:1:120
+983:1:127
+984:1:128
+985:1:127
+986:1:128
+987:1:135
+988:1:139
+989:0:4649
+990:2:4512
+991:0:4649
+992:1:144
+993:0:4649
+994:2:4513
+995:0:4649
+996:2:4514
+997:0:4649
+998:2:4519
+999:0:4649
+1000:2:4520
+1001:0:4649
+1002:2:4528
+1003:2:4529
+1004:2:4533
+1005:2:4537
+1006:2:4538
+1007:2:4542
+1008:2:4550
+1009:2:4551
+1010:2:4555
+1011:2:4559
+1012:2:4560
+1013:2:4555
+1014:2:4559
+1015:2:4560
+1016:2:4564
+1017:2:4571
+1018:2:4578
+1019:2:4579
+1020:2:4586
+1021:2:4591
+1022:2:4598
+1023:2:4599
+1024:2:4598
+1025:2:4599
+1026:2:4606
+1027:2:4610
+1028:0:4649
+1029:2:3799
+1030:2:3809
+1031:0:4649
+1032:2:2936
+1033:0:4649
+1034:2:3800
+1035:2:3801
+1036:0:4649
+1037:2:2936
+1038:0:4649
+1039:2:3805
+1040:0:4649
+1041:2:3813
+1042:0:4649
+1043:2:2932
+1044:0:4649
+1045:2:2934
+1046:0:4649
+1047:2:2935
+1048:0:4649
+1049:2:2936
+1050:0:4649
+1051:2:2937
+1052:2:2938
+1053:2:2942
+1054:2:2943
+1055:2:2951
+1056:2:2952
+1057:2:2956
+1058:2:2957
+1059:2:2965
+1060:2:2970
+1061:2:2974
+1062:2:2975
+1063:2:2983
+1064:2:2984
+1065:2:2988
+1066:2:2989
+1067:2:2983
+1068:2:2984
+1069:2:2985
+1070:2:2997
+1071:2:3002
+1072:2:3003
+1073:2:3014
+1074:2:3015
+1075:2:3016
+1076:2:3027
+1077:2:3032
+1078:2:3033
+1079:2:3044
+1080:2:3045
+1081:2:3046
+1082:2:3044
+1083:2:3045
+1084:2:3046
+1085:2:3057
+1086:2:3065
+1087:0:4649
+1088:2:2936
+1089:0:4649
+1090:2:3117
+1091:2:3118
+1092:2:3119
+1093:0:4649
+1094:2:2936
+1095:0:4649
+1096:2:3124
+1097:0:4649
+1098:1:145
+1099:0:4649
+1100:1:147
+1101:0:4649
+1102:1:46
+1103:0:4649
+1104:1:153
+1105:1:154
+1106:1:158
+1107:1:159
+1108:1:167
+1109:1:168
+1110:1:172
+1111:1:173
+1112:1:181
+1113:1:186
+1114:1:190
+1115:1:191
+1116:1:199
+1117:1:200
+1118:1:204
+1119:1:205
+1120:1:199
+1121:1:200
+1122:1:204
+1123:1:205
+1124:1:213
+1125:1:218
+1126:1:219
+1127:1:230
+1128:1:231
+1129:1:232
+1130:1:243
+1131:1:248
+1132:1:249
+1133:1:260
+1134:1:261
+1135:1:262
+1136:1:260
+1137:1:261
+1138:1:262
+1139:1:273
+1140:0:4649
+1141:1:42
+1142:0:4649
+1143:1:43
+1144:0:4649
+1145:1:44
+1146:0:4649
+1147:1:145
+1148:0:4649
+1149:1:147
+1150:0:4649
+1151:1:46
+1152:0:4649
+1153:1:282
+1154:1:283
+1155:0:4649
+1156:1:42
+1157:0:4649
+1158:1:43
+1159:0:4649
+1160:1:44
+1161:0:4649
+1162:1:145
+1163:0:4649
+1164:1:147
+1165:0:4649
+1166:1:46
+1167:0:4649
+1168:1:289
+1169:1:290
+1170:1:294
+1171:1:295
+1172:1:303
+1173:1:304
+1174:1:308
+1175:1:309
+1176:1:317
+1177:1:322
+1178:1:326
+1179:1:327
+1180:1:335
+1181:1:336
+1182:1:340
+1183:1:341
+1184:1:335
+1185:1:336
+1186:1:340
+1187:1:341
+1188:1:349
+1189:1:354
+1190:1:355
+1191:1:366
+1192:1:367
+1193:1:368
+1194:1:379
+1195:1:384
+1196:1:385
+1197:1:396
+1198:1:397
+1199:1:398
+1200:1:396
+1201:1:397
+1202:1:398
+1203:1:409
+1204:0:4649
+1205:1:42
+1206:0:4649
+1207:1:43
+1208:0:4649
+1209:1:44
+1210:0:4649
+1211:1:145
+1212:0:4649
+1213:1:147
+1214:0:4649
+1215:1:46
+1216:0:4649
+1217:1:418
+1218:1:419
+1219:1:423
+1220:1:424
+1221:1:432
+1222:1:433
+1223:1:437
+1224:1:438
+1225:1:446
+1226:1:451
+1227:1:455
+1228:1:456
+1229:1:464
+1230:1:465
+1231:1:469
+1232:1:470
+1233:1:464
+1234:1:465
+1235:1:469
+1236:1:470
+1237:1:478
+1238:1:483
+1239:1:484
+1240:1:495
+1241:1:496
+1242:1:497
+1243:1:508
+1244:1:513
+1245:1:514
+1246:1:525
+1247:1:526
+1248:1:527
+1249:1:525
+1250:1:526
+1251:1:527
+1252:1:538
+1253:1:545
+1254:0:4649
+1255:1:42
+1256:0:4649
+1257:1:43
+1258:0:4649
+1259:1:44
+1260:0:4649
+1261:1:145
+1262:0:4649
+1263:1:147
+1264:0:4649
+1265:1:46
+1266:0:4649
+1267:1:683
+1268:1:684
+1269:1:688
+1270:1:689
+1271:1:697
+1272:1:698
+1273:1:699
+1274:1:711
+1275:1:716
+1276:1:720
+1277:1:721
+1278:1:729
+1279:1:730
+1280:1:734
+1281:1:735
+1282:1:729
+1283:1:730
+1284:1:734
+1285:1:735
+1286:1:743
+1287:1:748
+1288:1:749
+1289:1:760
+1290:1:761
+1291:1:762
+1292:1:773
+1293:1:778
+1294:1:779
+1295:1:790
+1296:1:791
+1297:1:792
+1298:1:790
+1299:1:791
+1300:1:792
+1301:1:803
+1302:0:4649
+1303:1:42
+1304:0:4649
+1305:1:43
+1306:0:4649
+1307:1:44
+1308:0:4649
+1309:1:145
+1310:0:4649
+1311:1:147
+1312:0:4649
+1313:1:46
+1314:0:4649
+1315:1:812
+1316:1:815
+1317:1:816
+1318:0:4649
+1319:1:42
+1320:0:4649
+1321:1:43
+1322:0:4649
+1323:1:44
+1324:0:4649
+1325:1:145
+1326:0:4649
+1327:1:147
+1328:0:4649
+1329:1:46
+1330:0:4649
+1331:1:819
+1332:1:820
+1333:1:824
+1334:1:825
+1335:1:833
+1336:1:834
+1337:1:838
+1338:1:839
+1339:1:847
+1340:1:852
+1341:1:856
+1342:1:857
+1343:1:865
+1344:1:866
+1345:1:870
+1346:1:871
+1347:1:865
+1348:1:866
+1349:1:870
+1350:1:871
+1351:1:879
+1352:1:884
+1353:1:885
+1354:1:896
+1355:1:897
+1356:1:898
+1357:1:909
+1358:1:914
+1359:1:915
+1360:1:926
+1361:1:927
+1362:1:928
+1363:1:926
+1364:1:927
+1365:1:928
+1366:1:939
+1367:0:4649
+1368:1:42
+1369:0:4649
+1370:1:43
+1371:0:4649
+1372:1:44
+1373:0:4649
+1374:1:145
+1375:0:4649
+1376:1:147
+1377:0:4649
+1378:1:46
+1379:0:4649
+1380:1:1079
+1381:1:1080
+1382:1:1084
+1383:1:1085
+1384:1:1093
+1385:1:1094
+1386:1:1098
+1387:1:1099
+1388:1:1107
+1389:1:1112
+1390:1:1116
+1391:1:1117
+1392:1:1125
+1393:1:1126
+1394:1:1130
+1395:1:1131
+1396:1:1125
+1397:1:1126
+1398:1:1130
+1399:1:1131
+1400:1:1139
+1401:1:1144
+1402:1:1145
+1403:1:1156
+1404:1:1157
+1405:1:1158
+1406:1:1169
+1407:1:1174
+1408:1:1175
+1409:1:1186
+1410:1:1187
+1411:1:1188
+1412:1:1186
+1413:1:1187
+1414:1:1188
+1415:1:1199
+1416:1:1206
+1417:1:1210
+1418:0:4649
+1419:1:42
+1420:0:4649
+1421:1:43
+1422:0:4649
+1423:1:44
+1424:0:4649
+1425:1:145
+1426:0:4649
+1427:1:147
+1428:0:4649
+1429:1:46
+1430:0:4649
+1431:1:1211
+1432:1:1212
+1433:1:1216
+1434:1:1217
+1435:1:1225
+1436:1:1226
+1437:1:1227
+1438:1:1239
+1439:1:1244
+1440:1:1248
+1441:1:1249
+1442:1:1257
+1443:1:1258
+1444:1:1262
+1445:1:1263
+1446:1:1257
+1447:1:1258
+1448:1:1262
+1449:1:1263
+1450:1:1271
+1451:1:1276
+1452:1:1277
+1453:1:1288
+1454:1:1289
+1455:1:1290
+1456:1:1301
+1457:1:1306
+1458:1:1307
+1459:1:1318
+1460:1:1319
+1461:1:1320
+1462:1:1318
+1463:1:1319
+1464:1:1320
+1465:1:1331
+1466:0:4649
+1467:1:42
+1468:0:4649
+1469:1:43
+1470:0:4649
+1471:1:44
+1472:0:4649
+1473:1:145
+1474:0:4649
+1475:1:147
+1476:0:4649
+1477:1:46
+1478:0:4649
+1479:1:1340
+1480:0:4649
+1481:1:2804
+1482:1:2811
+1483:1:2812
+1484:1:2819
+1485:1:2824
+1486:1:2831
+1487:1:2832
+1488:1:2831
+1489:1:2832
+1490:1:2839
+1491:1:2843
+1492:0:4649
+1493:2:3828
+1494:2:3829
+1495:2:3833
+1496:2:3837
+1497:2:3838
+1498:2:3842
+1499:2:3847
+1500:2:3855
+1501:2:3859
+1502:2:3860
+1503:2:3855
+1504:2:3856
+1505:2:3864
+1506:2:3871
+1507:2:3878
+1508:2:3879
+1509:2:3886
+1510:2:3891
+1511:2:3898
+1512:2:3899
+1513:2:3898
+1514:2:3899
+1515:2:3906
+1516:2:3910
+1517:0:4649
+1518:2:3915
+1519:0:4649
+1520:2:3916
+1521:0:4649
+1522:2:3917
+1523:0:4649
+1524:2:3918
+1525:0:4649
+1526:1:1342
+1527:1:1343
+1528:0:4647
+1529:2:3919
+1530:0:4653
+1531:1:2501
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.define b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.define
new file mode 100644 (file)
index 0000000..5e642ef
--- /dev/null
@@ -0,0 +1 @@
+#define SINGLE_FLIP
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.log b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.log
new file mode 100644 (file)
index 0000000..1d98e20
--- /dev/null
@@ -0,0 +1,696 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_single_flip.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1362)
+Depth=    9651 States=    1e+06 Transitions= 1.79e+08 Memory=   513.029        t=    267 R=   4e+03
+Depth=    9651 States=    2e+06 Transitions= 4.37e+08 Memory=   559.416        t=    679 R=   3e+03
+Depth=    9651 States=    3e+06 Transitions= 7.08e+08 Memory=   605.901        t= 1.13e+03 R=   3e+03
+pan: resizing hashtable to -w22..  done
+Depth=    9651 States=    4e+06 Transitions=  9.4e+08 Memory=   683.213        t= 1.48e+03 R=   3e+03
+Depth=    9651 States=    5e+06 Transitions= 1.26e+09 Memory=   730.479        t= 1.98e+03 R=   3e+03
+Depth=    9651 States=    6e+06 Transitions= 1.55e+09 Memory=   777.451        t= 2.43e+03 R=   2e+03
+Depth=    9651 States=    7e+06 Transitions= 1.82e+09 Memory=   824.522        t= 2.87e+03 R=   2e+03
+Depth=    9651 States=    8e+06 Transitions= 2.01e+09 Memory=   871.494        t= 3.16e+03 R=   3e+03
+Depth=    9651 States=    9e+06 Transitions= 2.19e+09 Memory=   917.295        t= 3.45e+03 R=   3e+03
+pan: resizing hashtable to -w24..  done
+Depth=    9651 States=    1e+07 Transitions= 2.37e+09 Memory=  1087.092        t= 3.73e+03 R=   3e+03
+Depth=    9651 States=  1.1e+07 Transitions= 2.59e+09 Memory=  1133.088        t= 4.06e+03 R=   3e+03
+Depth=    9651 States=  1.2e+07 Transitions= 2.88e+09 Memory=  1179.572        t= 4.5e+03 R=   3e+03
+Depth=    9651 States=  1.3e+07 Transitions=  3.1e+09 Memory=  1226.545        t= 4.84e+03 R=   3e+03
+Depth=    9651 States=  1.4e+07 Transitions= 3.33e+09 Memory=  1272.834        t= 5.18e+03 R=   3e+03
+Depth=    9651 States=  1.5e+07 Transitions= 3.58e+09 Memory=  1318.733        t= 5.57e+03 R=   3e+03
+Depth=    9651 States=  1.6e+07 Transitions= 3.79e+09 Memory=  1364.729        t= 5.9e+03 R=   3e+03
+Depth=    9651 States=  1.7e+07 Transitions= 4.09e+09 Memory=  1410.725        t= 6.36e+03 R=   3e+03
+Depth=    9892 States=  1.8e+07 Transitions= 4.34e+09 Memory=  1456.526        t= 6.76e+03 R=   3e+03
+Depth=    9897 States=  1.9e+07 Transitions= 4.61e+09 Memory=  1502.326        t= 7.17e+03 R=   3e+03
+Depth=    9897 States=    2e+07 Transitions= 4.88e+09 Memory=  1548.127        t= 7.6e+03 R=   3e+03
+Depth=    9897 States=  2.1e+07 Transitions= 5.16e+09 Memory=  1594.318        t= 8.03e+03 R=   3e+03
+Depth=    9897 States=  2.2e+07 Transitions= 5.46e+09 Memory=  1640.315        t= 8.52e+03 R=   3e+03
+Depth=    9897 States=  2.3e+07 Transitions= 5.74e+09 Memory=  1686.115        t= 8.95e+03 R=   3e+03
+Depth=    9897 States=  2.4e+07 Transitions=    6e+09 Memory=  1731.916        t= 9.36e+03 R=   3e+03
+Depth=    9897 States=  2.5e+07 Transitions= 6.24e+09 Memory=  1777.717        t= 9.75e+03 R=   3e+03
+Depth=    9897 States=  2.6e+07 Transitions= 6.48e+09 Memory=  1823.518        t= 1.01e+04 R=   3e+03
+Depth=    9897 States=  2.7e+07 Transitions=  6.7e+09 Memory=  1869.318        t= 1.05e+04 R=   3e+03
+Depth=    9897 States=  2.8e+07 Transitions= 7.03e+09 Memory=  1916.486        t= 1.1e+04 R=   3e+03
+Depth=    9897 States=  2.9e+07 Transitions= 7.36e+09 Memory=  1962.678        t= 1.15e+04 R=   3e+03
+Depth=    9897 States=    3e+07 Transitions= 7.63e+09 Memory=  2008.967        t= 1.2e+04 R=   3e+03
+Depth=    9897 States=  3.1e+07 Transitions= 7.94e+09 Memory=  2054.963        t= 1.25e+04 R=   2e+03
+Depth=    9897 States=  3.2e+07 Transitions=  8.2e+09 Memory=  2102.033        t= 1.29e+04 R=   2e+03
+Depth=    9897 States=  3.3e+07 Transitions= 8.41e+09 Memory=  2148.029        t= 1.32e+04 R=   2e+03
+Depth=    9897 States=  3.4e+07 Transitions= 8.68e+09 Memory=  2194.123        t= 1.36e+04 R=   2e+03
+pan: resizing hashtable to -w26..  done
+Depth=    9897 States=  3.5e+07 Transitions= 8.91e+09 Memory=  2736.006        t= 1.4e+04 R=   2e+03
+Depth=    9897 States=  3.6e+07 Transitions=  9.2e+09 Memory=  2781.807        t= 1.44e+04 R=   2e+03
+Depth=    9897 States=  3.7e+07 Transitions= 9.48e+09 Memory=  2827.608        t= 1.49e+04 R=   2e+03
+Depth=    9897 States=  3.8e+07 Transitions= 9.72e+09 Memory=  2873.408        t= 1.52e+04 R=   2e+03
+Depth=    9897 States=  3.9e+07 Transitions= 9.98e+09 Memory=  2919.209        t= 1.56e+04 R=   2e+03
+Depth=    9897 States=    4e+07 Transitions= 1.02e+10 Memory=  2965.010        t= 1.6e+04 R=   2e+03
+Depth=    9897 States=  4.1e+07 Transitions= 1.05e+10 Memory=  3010.713        t= 1.64e+04 R=   2e+03
+Depth=    9897 States=  4.2e+07 Transitions= 1.07e+10 Memory=  3056.611        t= 1.68e+04 R=   3e+03
+pan: claim violated! (at depth 1439)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 9897, errors: 1
+ 42642410 states, stored
+1.0909582e+10 states, matched
+1.0952224e+10 transitions (= stored+matched)
+5.922913e+10 atomic steps
+hash conflicts: 6.3170511e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 4717.369      equivalent memory usage for states (stored*(State-vector + overhead))
+ 2117.231      actual memory usage for states (compression: 44.88%)
+               state-vector as stored = 16 byte + 36 byte overhead
+  512.000      memory used for hash table (-w26)
+  457.764      memory used for DFS stack (-m10000000)
+ 3086.494      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 304368 3993 2946 2 2 ]
+unreached in proctype urcu_reader
+       line 894, "pan.___", state 12, "((i<1))"
+       line 894, "pan.___", state 12, "((i>=1))"
+       line 268, "pan.___", state 57, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 88, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 104, "(1)"
+       line 249, "pan.___", state 112, "(1)"
+       line 253, "pan.___", state 124, "(1)"
+       line 257, "pan.___", state 132, "(1)"
+       line 407, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 190, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 204, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 223, "(1)"
+       line 434, "pan.___", state 253, "(1)"
+       line 438, "pan.___", state 266, "(1)"
+       line 687, "pan.___", state 287, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 407, "pan.___", state 294, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 326, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 340, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 359, "(1)"
+       line 434, "pan.___", state 389, "(1)"
+       line 438, "pan.___", state 402, "(1)"
+       line 407, "pan.___", state 423, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 455, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 469, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 488, "(1)"
+       line 434, "pan.___", state 518, "(1)"
+       line 438, "pan.___", state 531, "(1)"
+       line 407, "pan.___", state 554, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 556, "(1)"
+       line 407, "pan.___", state 557, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 557, "else"
+       line 407, "pan.___", state 560, "(1)"
+       line 411, "pan.___", state 568, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 570, "(1)"
+       line 411, "pan.___", state 571, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 571, "else"
+       line 411, "pan.___", state 574, "(1)"
+       line 411, "pan.___", state 575, "(1)"
+       line 411, "pan.___", state 575, "(1)"
+       line 409, "pan.___", state 580, "((i<1))"
+       line 409, "pan.___", state 580, "((i>=1))"
+       line 416, "pan.___", state 586, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 588, "(1)"
+       line 416, "pan.___", state 589, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 589, "else"
+       line 416, "pan.___", state 592, "(1)"
+       line 416, "pan.___", state 593, "(1)"
+       line 416, "pan.___", state 593, "(1)"
+       line 420, "pan.___", state 600, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 602, "(1)"
+       line 420, "pan.___", state 603, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 603, "else"
+       line 420, "pan.___", state 606, "(1)"
+       line 420, "pan.___", state 607, "(1)"
+       line 420, "pan.___", state 607, "(1)"
+       line 418, "pan.___", state 612, "((i<2))"
+       line 418, "pan.___", state 612, "((i>=2))"
+       line 425, "pan.___", state 619, "(1)"
+       line 425, "pan.___", state 620, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 620, "else"
+       line 425, "pan.___", state 623, "(1)"
+       line 425, "pan.___", state 624, "(1)"
+       line 425, "pan.___", state 624, "(1)"
+       line 429, "pan.___", state 632, "(1)"
+       line 429, "pan.___", state 633, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 633, "else"
+       line 429, "pan.___", state 636, "(1)"
+       line 429, "pan.___", state 637, "(1)"
+       line 429, "pan.___", state 637, "(1)"
+       line 427, "pan.___", state 642, "((i<1))"
+       line 427, "pan.___", state 642, "((i>=1))"
+       line 434, "pan.___", state 649, "(1)"
+       line 434, "pan.___", state 650, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 650, "else"
+       line 434, "pan.___", state 653, "(1)"
+       line 434, "pan.___", state 654, "(1)"
+       line 434, "pan.___", state 654, "(1)"
+       line 438, "pan.___", state 662, "(1)"
+       line 438, "pan.___", state 663, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 663, "else"
+       line 438, "pan.___", state 666, "(1)"
+       line 438, "pan.___", state 667, "(1)"
+       line 438, "pan.___", state 667, "(1)"
+       line 436, "pan.___", state 672, "((i<2))"
+       line 436, "pan.___", state 672, "((i>=2))"
+       line 446, "pan.___", state 676, "(1)"
+       line 446, "pan.___", state 676, "(1)"
+       line 687, "pan.___", state 679, "cached_urcu_active_readers = (tmp+1)"
+       line 687, "pan.___", state 680, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 687, "pan.___", state 681, "(1)"
+       line 407, "pan.___", state 688, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 720, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 734, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 753, "(1)"
+       line 434, "pan.___", state 783, "(1)"
+       line 438, "pan.___", state 796, "(1)"
+       line 407, "pan.___", state 824, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 856, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 870, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 889, "(1)"
+       line 434, "pan.___", state 919, "(1)"
+       line 438, "pan.___", state 932, "(1)"
+       line 407, "pan.___", state 953, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 955, "(1)"
+       line 407, "pan.___", state 956, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 956, "else"
+       line 407, "pan.___", state 959, "(1)"
+       line 411, "pan.___", state 967, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 969, "(1)"
+       line 411, "pan.___", state 970, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 970, "else"
+       line 411, "pan.___", state 973, "(1)"
+       line 411, "pan.___", state 974, "(1)"
+       line 411, "pan.___", state 974, "(1)"
+       line 409, "pan.___", state 979, "((i<1))"
+       line 409, "pan.___", state 979, "((i>=1))"
+       line 416, "pan.___", state 985, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 987, "(1)"
+       line 416, "pan.___", state 988, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 988, "else"
+       line 416, "pan.___", state 991, "(1)"
+       line 416, "pan.___", state 992, "(1)"
+       line 416, "pan.___", state 992, "(1)"
+       line 420, "pan.___", state 999, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 1001, "(1)"
+       line 420, "pan.___", state 1002, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 1002, "else"
+       line 420, "pan.___", state 1005, "(1)"
+       line 420, "pan.___", state 1006, "(1)"
+       line 420, "pan.___", state 1006, "(1)"
+       line 418, "pan.___", state 1011, "((i<2))"
+       line 418, "pan.___", state 1011, "((i>=2))"
+       line 425, "pan.___", state 1018, "(1)"
+       line 425, "pan.___", state 1019, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 1019, "else"
+       line 425, "pan.___", state 1022, "(1)"
+       line 425, "pan.___", state 1023, "(1)"
+       line 425, "pan.___", state 1023, "(1)"
+       line 429, "pan.___", state 1031, "(1)"
+       line 429, "pan.___", state 1032, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 1032, "else"
+       line 429, "pan.___", state 1035, "(1)"
+       line 429, "pan.___", state 1036, "(1)"
+       line 429, "pan.___", state 1036, "(1)"
+       line 427, "pan.___", state 1041, "((i<1))"
+       line 427, "pan.___", state 1041, "((i>=1))"
+       line 434, "pan.___", state 1048, "(1)"
+       line 434, "pan.___", state 1049, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 1049, "else"
+       line 434, "pan.___", state 1052, "(1)"
+       line 434, "pan.___", state 1053, "(1)"
+       line 434, "pan.___", state 1053, "(1)"
+       line 438, "pan.___", state 1061, "(1)"
+       line 438, "pan.___", state 1062, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 1062, "else"
+       line 438, "pan.___", state 1065, "(1)"
+       line 438, "pan.___", state 1066, "(1)"
+       line 438, "pan.___", state 1066, "(1)"
+       line 436, "pan.___", state 1071, "((i<2))"
+       line 436, "pan.___", state 1071, "((i>=2))"
+       line 446, "pan.___", state 1075, "(1)"
+       line 446, "pan.___", state 1075, "(1)"
+       line 695, "pan.___", state 1079, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 407, "pan.___", state 1084, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1116, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1130, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1149, "(1)"
+       line 434, "pan.___", state 1179, "(1)"
+       line 438, "pan.___", state 1192, "(1)"
+       line 407, "pan.___", state 1216, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1248, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1262, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1281, "(1)"
+       line 434, "pan.___", state 1311, "(1)"
+       line 438, "pan.___", state 1324, "(1)"
+       line 407, "pan.___", state 1349, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1381, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1395, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1414, "(1)"
+       line 434, "pan.___", state 1444, "(1)"
+       line 438, "pan.___", state 1457, "(1)"
+       line 407, "pan.___", state 1478, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1510, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1524, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1543, "(1)"
+       line 434, "pan.___", state 1573, "(1)"
+       line 438, "pan.___", state 1586, "(1)"
+       line 407, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1644, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1658, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1677, "(1)"
+       line 434, "pan.___", state 1707, "(1)"
+       line 438, "pan.___", state 1720, "(1)"
+       line 407, "pan.___", state 1741, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1773, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1787, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1806, "(1)"
+       line 434, "pan.___", state 1836, "(1)"
+       line 438, "pan.___", state 1849, "(1)"
+       line 407, "pan.___", state 1873, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1905, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1919, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 1938, "(1)"
+       line 434, "pan.___", state 1968, "(1)"
+       line 438, "pan.___", state 1981, "(1)"
+       line 734, "pan.___", state 2002, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 407, "pan.___", state 2009, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2041, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2055, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2074, "(1)"
+       line 434, "pan.___", state 2104, "(1)"
+       line 438, "pan.___", state 2117, "(1)"
+       line 407, "pan.___", state 2138, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2170, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2184, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2203, "(1)"
+       line 434, "pan.___", state 2233, "(1)"
+       line 438, "pan.___", state 2246, "(1)"
+       line 407, "pan.___", state 2269, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 2271, "(1)"
+       line 407, "pan.___", state 2272, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 2272, "else"
+       line 407, "pan.___", state 2275, "(1)"
+       line 411, "pan.___", state 2283, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 2285, "(1)"
+       line 411, "pan.___", state 2286, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 2286, "else"
+       line 411, "pan.___", state 2289, "(1)"
+       line 411, "pan.___", state 2290, "(1)"
+       line 411, "pan.___", state 2290, "(1)"
+       line 409, "pan.___", state 2295, "((i<1))"
+       line 409, "pan.___", state 2295, "((i>=1))"
+       line 416, "pan.___", state 2301, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 2303, "(1)"
+       line 416, "pan.___", state 2304, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 2304, "else"
+       line 416, "pan.___", state 2307, "(1)"
+       line 416, "pan.___", state 2308, "(1)"
+       line 416, "pan.___", state 2308, "(1)"
+       line 420, "pan.___", state 2315, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 2317, "(1)"
+       line 420, "pan.___", state 2318, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 2318, "else"
+       line 420, "pan.___", state 2321, "(1)"
+       line 420, "pan.___", state 2322, "(1)"
+       line 420, "pan.___", state 2322, "(1)"
+       line 418, "pan.___", state 2327, "((i<2))"
+       line 418, "pan.___", state 2327, "((i>=2))"
+       line 425, "pan.___", state 2334, "(1)"
+       line 425, "pan.___", state 2335, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 2335, "else"
+       line 425, "pan.___", state 2338, "(1)"
+       line 425, "pan.___", state 2339, "(1)"
+       line 425, "pan.___", state 2339, "(1)"
+       line 429, "pan.___", state 2347, "(1)"
+       line 429, "pan.___", state 2348, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 2348, "else"
+       line 429, "pan.___", state 2351, "(1)"
+       line 429, "pan.___", state 2352, "(1)"
+       line 429, "pan.___", state 2352, "(1)"
+       line 427, "pan.___", state 2357, "((i<1))"
+       line 427, "pan.___", state 2357, "((i>=1))"
+       line 434, "pan.___", state 2364, "(1)"
+       line 434, "pan.___", state 2365, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 2365, "else"
+       line 434, "pan.___", state 2368, "(1)"
+       line 434, "pan.___", state 2369, "(1)"
+       line 434, "pan.___", state 2369, "(1)"
+       line 438, "pan.___", state 2377, "(1)"
+       line 438, "pan.___", state 2378, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 2378, "else"
+       line 438, "pan.___", state 2381, "(1)"
+       line 438, "pan.___", state 2382, "(1)"
+       line 438, "pan.___", state 2382, "(1)"
+       line 436, "pan.___", state 2387, "((i<2))"
+       line 436, "pan.___", state 2387, "((i>=2))"
+       line 446, "pan.___", state 2391, "(1)"
+       line 446, "pan.___", state 2391, "(1)"
+       line 734, "pan.___", state 2394, "cached_urcu_active_readers = (tmp+1)"
+       line 734, "pan.___", state 2395, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 734, "pan.___", state 2396, "(1)"
+       line 407, "pan.___", state 2403, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2435, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2449, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2468, "(1)"
+       line 434, "pan.___", state 2498, "(1)"
+       line 438, "pan.___", state 2511, "(1)"
+       line 407, "pan.___", state 2538, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2570, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2584, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2603, "(1)"
+       line 434, "pan.___", state 2633, "(1)"
+       line 438, "pan.___", state 2646, "(1)"
+       line 407, "pan.___", state 2667, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2699, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2713, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 2732, "(1)"
+       line 434, "pan.___", state 2762, "(1)"
+       line 438, "pan.___", state 2775, "(1)"
+       line 245, "pan.___", state 2808, "(1)"
+       line 253, "pan.___", state 2828, "(1)"
+       line 257, "pan.___", state 2836, "(1)"
+       line 245, "pan.___", state 2851, "(1)"
+       line 253, "pan.___", state 2871, "(1)"
+       line 257, "pan.___", state 2879, "(1)"
+       line 929, "pan.___", state 2896, "-end-"
+       (246 of 2896 states)
+unreached in proctype urcu_writer
+       line 1018, "pan.___", state 12, "((i<1))"
+       line 1018, "pan.___", state 12, "((i>=1))"
+       line 407, "pan.___", state 49, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 63, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 81, "cache_dirty_rcu_ptr = 0"
+       line 425, "pan.___", state 114, "(1)"
+       line 429, "pan.___", state 127, "(1)"
+       line 434, "pan.___", state 144, "(1)"
+       line 268, "pan.___", state 180, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 189, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 202, "cache_dirty_rcu_ptr = 0"
+       line 407, "pan.___", state 242, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 256, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 274, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 288, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 307, "(1)"
+       line 429, "pan.___", state 320, "(1)"
+       line 434, "pan.___", state 337, "(1)"
+       line 438, "pan.___", state 350, "(1)"
+       line 411, "pan.___", state 387, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 405, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 419, "cache_dirty_rcu_data[i] = 0"
+       line 429, "pan.___", state 451, "(1)"
+       line 434, "pan.___", state 468, "(1)"
+       line 438, "pan.___", state 481, "(1)"
+       line 407, "pan.___", state 511, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 525, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 543, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 557, "cache_dirty_rcu_data[i] = 0"
+       line 425, "pan.___", state 576, "(1)"
+       line 429, "pan.___", state 589, "(1)"
+       line 434, "pan.___", state 606, "(1)"
+       line 438, "pan.___", state 619, "(1)"
+       line 407, "pan.___", state 640, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 642, "(1)"
+       line 407, "pan.___", state 643, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 643, "else"
+       line 407, "pan.___", state 646, "(1)"
+       line 411, "pan.___", state 654, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 656, "(1)"
+       line 411, "pan.___", state 657, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 657, "else"
+       line 411, "pan.___", state 660, "(1)"
+       line 411, "pan.___", state 661, "(1)"
+       line 411, "pan.___", state 661, "(1)"
+       line 409, "pan.___", state 666, "((i<1))"
+       line 409, "pan.___", state 666, "((i>=1))"
+       line 416, "pan.___", state 672, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 674, "(1)"
+       line 416, "pan.___", state 675, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 675, "else"
+       line 416, "pan.___", state 678, "(1)"
+       line 416, "pan.___", state 679, "(1)"
+       line 416, "pan.___", state 679, "(1)"
+       line 420, "pan.___", state 686, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 688, "(1)"
+       line 420, "pan.___", state 689, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 689, "else"
+       line 420, "pan.___", state 692, "(1)"
+       line 420, "pan.___", state 693, "(1)"
+       line 420, "pan.___", state 693, "(1)"
+       line 418, "pan.___", state 698, "((i<2))"
+       line 418, "pan.___", state 698, "((i>=2))"
+       line 425, "pan.___", state 705, "(1)"
+       line 425, "pan.___", state 706, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 706, "else"
+       line 425, "pan.___", state 709, "(1)"
+       line 425, "pan.___", state 710, "(1)"
+       line 425, "pan.___", state 710, "(1)"
+       line 429, "pan.___", state 718, "(1)"
+       line 429, "pan.___", state 719, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 719, "else"
+       line 429, "pan.___", state 722, "(1)"
+       line 429, "pan.___", state 723, "(1)"
+       line 429, "pan.___", state 723, "(1)"
+       line 427, "pan.___", state 728, "((i<1))"
+       line 427, "pan.___", state 728, "((i>=1))"
+       line 434, "pan.___", state 735, "(1)"
+       line 434, "pan.___", state 736, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 736, "else"
+       line 434, "pan.___", state 739, "(1)"
+       line 434, "pan.___", state 740, "(1)"
+       line 434, "pan.___", state 740, "(1)"
+       line 438, "pan.___", state 748, "(1)"
+       line 438, "pan.___", state 749, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 749, "else"
+       line 438, "pan.___", state 752, "(1)"
+       line 438, "pan.___", state 753, "(1)"
+       line 438, "pan.___", state 753, "(1)"
+       line 436, "pan.___", state 758, "((i<2))"
+       line 436, "pan.___", state 758, "((i>=2))"
+       line 446, "pan.___", state 762, "(1)"
+       line 446, "pan.___", state 762, "(1)"
+       line 1184, "pan.___", state 766, "_proc_urcu_writer = (_proc_urcu_writer|(1<<10))"
+       line 407, "pan.___", state 771, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 773, "(1)"
+       line 407, "pan.___", state 774, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 774, "else"
+       line 407, "pan.___", state 777, "(1)"
+       line 411, "pan.___", state 785, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 787, "(1)"
+       line 411, "pan.___", state 788, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 788, "else"
+       line 411, "pan.___", state 791, "(1)"
+       line 411, "pan.___", state 792, "(1)"
+       line 411, "pan.___", state 792, "(1)"
+       line 409, "pan.___", state 797, "((i<1))"
+       line 409, "pan.___", state 797, "((i>=1))"
+       line 416, "pan.___", state 803, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 805, "(1)"
+       line 416, "pan.___", state 806, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 806, "else"
+       line 416, "pan.___", state 809, "(1)"
+       line 416, "pan.___", state 810, "(1)"
+       line 416, "pan.___", state 810, "(1)"
+       line 420, "pan.___", state 817, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 819, "(1)"
+       line 420, "pan.___", state 820, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 820, "else"
+       line 420, "pan.___", state 823, "(1)"
+       line 420, "pan.___", state 824, "(1)"
+       line 420, "pan.___", state 824, "(1)"
+       line 418, "pan.___", state 829, "((i<2))"
+       line 418, "pan.___", state 829, "((i>=2))"
+       line 425, "pan.___", state 836, "(1)"
+       line 425, "pan.___", state 837, "(!(cache_dirty_urcu_gp_ctr))"
+       line 425, "pan.___", state 837, "else"
+       line 425, "pan.___", state 840, "(1)"
+       line 425, "pan.___", state 841, "(1)"
+       line 425, "pan.___", state 841, "(1)"
+       line 429, "pan.___", state 849, "(1)"
+       line 429, "pan.___", state 850, "(!(cache_dirty_urcu_active_readers))"
+       line 429, "pan.___", state 850, "else"
+       line 429, "pan.___", state 853, "(1)"
+       line 429, "pan.___", state 854, "(1)"
+       line 429, "pan.___", state 854, "(1)"
+       line 427, "pan.___", state 859, "((i<1))"
+       line 427, "pan.___", state 859, "((i>=1))"
+       line 434, "pan.___", state 866, "(1)"
+       line 434, "pan.___", state 867, "(!(cache_dirty_rcu_ptr))"
+       line 434, "pan.___", state 867, "else"
+       line 434, "pan.___", state 870, "(1)"
+       line 434, "pan.___", state 871, "(1)"
+       line 434, "pan.___", state 871, "(1)"
+       line 438, "pan.___", state 879, "(1)"
+       line 438, "pan.___", state 880, "(!(cache_dirty_rcu_data[i]))"
+       line 438, "pan.___", state 880, "else"
+       line 438, "pan.___", state 883, "(1)"
+       line 438, "pan.___", state 884, "(1)"
+       line 438, "pan.___", state 884, "(1)"
+       line 436, "pan.___", state 889, "((i<2))"
+       line 436, "pan.___", state 889, "((i>=2))"
+       line 446, "pan.___", state 893, "(1)"
+       line 446, "pan.___", state 893, "(1)"
+       line 1200, "pan.___", state 898, "_proc_urcu_writer = (_proc_urcu_writer|(1<<11))"
+       line 1195, "pan.___", state 899, "(((tmp2&((1<<7)-1))&&((tmp2^0)&(1<<7))))"
+       line 1195, "pan.___", state 899, "else"
+       line 1220, "pan.___", state 903, "_proc_urcu_writer = (_proc_urcu_writer&~(((1<<12)|(1<<11))))"
+       line 268, "pan.___", state 934, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 943, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 958, "(1)"
+       line 280, "pan.___", state 965, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 981, "(1)"
+       line 249, "pan.___", state 989, "(1)"
+       line 253, "pan.___", state 1001, "(1)"
+       line 257, "pan.___", state 1009, "(1)"
+       line 268, "pan.___", state 1040, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1049, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1062, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1071, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1087, "(1)"
+       line 249, "pan.___", state 1095, "(1)"
+       line 253, "pan.___", state 1107, "(1)"
+       line 257, "pan.___", state 1115, "(1)"
+       line 272, "pan.___", state 1141, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1154, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1163, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1179, "(1)"
+       line 249, "pan.___", state 1187, "(1)"
+       line 253, "pan.___", state 1199, "(1)"
+       line 257, "pan.___", state 1207, "(1)"
+       line 268, "pan.___", state 1238, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1247, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1260, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1269, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1285, "(1)"
+       line 249, "pan.___", state 1293, "(1)"
+       line 253, "pan.___", state 1305, "(1)"
+       line 257, "pan.___", state 1313, "(1)"
+       line 268, "pan.___", state 1330, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1332, "(1)"
+       line 272, "pan.___", state 1339, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1341, "(1)"
+       line 272, "pan.___", state 1342, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1342, "else"
+       line 270, "pan.___", state 1347, "((i<1))"
+       line 270, "pan.___", state 1347, "((i>=1))"
+       line 276, "pan.___", state 1352, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1354, "(1)"
+       line 276, "pan.___", state 1355, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1355, "else"
+       line 280, "pan.___", state 1361, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1363, "(1)"
+       line 280, "pan.___", state 1364, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1364, "else"
+       line 278, "pan.___", state 1369, "((i<2))"
+       line 278, "pan.___", state 1369, "((i>=2))"
+       line 245, "pan.___", state 1377, "(1)"
+       line 249, "pan.___", state 1385, "(1)"
+       line 249, "pan.___", state 1386, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1386, "else"
+       line 247, "pan.___", state 1391, "((i<1))"
+       line 247, "pan.___", state 1391, "((i>=1))"
+       line 253, "pan.___", state 1397, "(1)"
+       line 253, "pan.___", state 1398, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1398, "else"
+       line 257, "pan.___", state 1405, "(1)"
+       line 257, "pan.___", state 1406, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1406, "else"
+       line 262, "pan.___", state 1415, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1415, "else"
+       line 1296, "pan.___", state 1418, "i = 0"
+       line 1296, "pan.___", state 1420, "reader_barrier = 1"
+       line 1296, "pan.___", state 1431, "((i<1))"
+       line 1296, "pan.___", state 1431, "((i>=1))"
+       line 268, "pan.___", state 1436, "cache_dirty_urcu_gp_ctr = 0"
+       line 268, "pan.___", state 1438, "(1)"
+       line 272, "pan.___", state 1445, "cache_dirty_urcu_active_readers = 0"
+       line 272, "pan.___", state 1447, "(1)"
+       line 272, "pan.___", state 1448, "(cache_dirty_urcu_active_readers)"
+       line 272, "pan.___", state 1448, "else"
+       line 270, "pan.___", state 1453, "((i<1))"
+       line 270, "pan.___", state 1453, "((i>=1))"
+       line 276, "pan.___", state 1458, "cache_dirty_rcu_ptr = 0"
+       line 276, "pan.___", state 1460, "(1)"
+       line 276, "pan.___", state 1461, "(cache_dirty_rcu_ptr)"
+       line 276, "pan.___", state 1461, "else"
+       line 280, "pan.___", state 1467, "cache_dirty_rcu_data[i] = 0"
+       line 280, "pan.___", state 1469, "(1)"
+       line 280, "pan.___", state 1470, "(cache_dirty_rcu_data[i])"
+       line 280, "pan.___", state 1470, "else"
+       line 278, "pan.___", state 1475, "((i<2))"
+       line 278, "pan.___", state 1475, "((i>=2))"
+       line 245, "pan.___", state 1483, "(1)"
+       line 249, "pan.___", state 1491, "(1)"
+       line 249, "pan.___", state 1492, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 1492, "else"
+       line 247, "pan.___", state 1497, "((i<1))"
+       line 247, "pan.___", state 1497, "((i>=1))"
+       line 253, "pan.___", state 1503, "(1)"
+       line 253, "pan.___", state 1504, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 1504, "else"
+       line 257, "pan.___", state 1511, "(1)"
+       line 257, "pan.___", state 1512, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 1512, "else"
+       line 262, "pan.___", state 1521, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 1521, "else"
+       line 295, "pan.___", state 1523, "(cache_dirty_urcu_gp_ctr)"
+       line 295, "pan.___", state 1523, "else"
+       line 1296, "pan.___", state 1524, "(cache_dirty_urcu_gp_ctr)"
+       line 1296, "pan.___", state 1524, "else"
+       line 272, "pan.___", state 1537, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1550, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1559, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1575, "(1)"
+       line 249, "pan.___", state 1583, "(1)"
+       line 253, "pan.___", state 1595, "(1)"
+       line 257, "pan.___", state 1603, "(1)"
+       line 268, "pan.___", state 1634, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1643, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1656, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1665, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1681, "(1)"
+       line 249, "pan.___", state 1689, "(1)"
+       line 253, "pan.___", state 1701, "(1)"
+       line 257, "pan.___", state 1709, "(1)"
+       line 1304, "pan.___", state 1725, "-end-"
+       (212 of 1725 states)
+unreached in proctype :init:
+       line 1319, "pan.___", state 13, "((i<1))"
+       line 1319, "pan.___", state 13, "((i>=1))"
+       (1 of 28 states)
+unreached in proctype :never:
+       line 1367, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 1.71e+04 seconds
+pan: rate 2490.8226 states/second
+pan: avg transition delay 1.5631e-06 usec
+cp .input.spin urcu_free_single_flip.spin.input
+cp .input.spin.trail urcu_free_single_flip.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.spin.input b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.spin.input
new file mode 100644 (file)
index 0000000..602bb6c
--- /dev/null
@@ -0,0 +1,1340 @@
+#define SINGLE_FLIP
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.spin.input.trail b/formal-model/urcu-controldataflow-alpha-ipi/urcu_free_single_flip.spin.input.trail
new file mode 100644 (file)
index 0000000..6a83ce1
--- /dev/null
@@ -0,0 +1,1442 @@
+-2:3:-2
+-4:-4:-4
+1:0:4651
+2:2:2896
+3:2:2901
+4:2:2905
+5:2:2913
+6:2:2917
+7:2:2921
+8:0:4651
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:4651
+16:3:4621
+17:3:4624
+18:3:4631
+19:3:4638
+20:3:4641
+21:3:4645
+22:3:4646
+23:0:4651
+24:3:4648
+25:0:4651
+26:2:2925
+27:0:4651
+28:2:2931
+29:0:4651
+30:2:2932
+31:0:4651
+32:2:2934
+33:0:4651
+34:2:2935
+35:0:4651
+36:2:2936
+37:0:4651
+38:2:2937
+39:0:4651
+40:2:2938
+41:0:4651
+42:2:2939
+43:0:4651
+44:2:2940
+45:2:2941
+46:2:2945
+47:2:2946
+48:2:2954
+49:2:2955
+50:2:2959
+51:2:2960
+52:2:2968
+53:2:2973
+54:2:2977
+55:2:2978
+56:2:2986
+57:2:2987
+58:2:2991
+59:2:2992
+60:2:2986
+61:2:2987
+62:2:2991
+63:2:2992
+64:2:3000
+65:2:3005
+66:2:3006
+67:2:3017
+68:2:3018
+69:2:3019
+70:2:3030
+71:2:3035
+72:2:3036
+73:2:3047
+74:2:3048
+75:2:3049
+76:2:3047
+77:2:3048
+78:2:3049
+79:2:3060
+80:2:3068
+81:0:4651
+82:2:2939
+83:0:4651
+84:2:3072
+85:2:3076
+86:2:3077
+87:2:3081
+88:2:3085
+89:2:3086
+90:2:3090
+91:2:3098
+92:2:3099
+93:2:3103
+94:2:3107
+95:2:3108
+96:2:3103
+97:2:3104
+98:2:3112
+99:0:4651
+100:2:2939
+101:0:4651
+102:2:3120
+103:2:3121
+104:2:3122
+105:0:4651
+106:2:2939
+107:0:4651
+108:2:3127
+109:0:4651
+110:2:3830
+111:2:3831
+112:2:3835
+113:2:3839
+114:2:3840
+115:2:3844
+116:2:3849
+117:2:3857
+118:2:3861
+119:2:3862
+120:2:3857
+121:2:3861
+122:2:3862
+123:2:3866
+124:2:3873
+125:2:3880
+126:2:3881
+127:2:3888
+128:2:3893
+129:2:3900
+130:2:3901
+131:2:3900
+132:2:3901
+133:2:3908
+134:2:3912
+135:0:4651
+136:2:3917
+137:0:4651
+138:2:3918
+139:0:4651
+140:2:3919
+141:0:4651
+142:2:3920
+143:0:4651
+144:1:29
+145:0:4651
+146:2:3921
+147:0:4651
+148:1:35
+149:0:4651
+150:1:36
+151:0:4651
+152:2:3920
+153:0:4651
+154:1:37
+155:0:4651
+156:2:3921
+157:0:4651
+158:1:38
+159:0:4651
+160:2:3920
+161:0:4651
+162:1:39
+163:0:4651
+164:2:3921
+165:0:4651
+166:1:40
+167:0:4651
+168:2:3920
+169:0:4651
+170:1:41
+171:0:4651
+172:2:3921
+173:0:4651
+174:1:42
+175:0:4651
+176:1:43
+177:0:4651
+178:2:3920
+179:0:4651
+180:1:44
+181:0:4651
+182:2:3921
+183:0:4651
+184:1:53
+185:0:4651
+186:2:3920
+187:0:4651
+188:1:57
+189:1:58
+190:1:62
+191:1:66
+192:1:67
+193:1:71
+194:1:79
+195:1:80
+196:1:84
+197:1:88
+198:1:89
+199:1:84
+200:1:88
+201:1:89
+202:1:93
+203:1:100
+204:1:107
+205:1:108
+206:1:115
+207:1:120
+208:1:127
+209:1:128
+210:1:127
+211:1:128
+212:1:135
+213:1:139
+214:0:4651
+215:2:3921
+216:0:4651
+217:1:144
+218:0:4651
+219:2:3922
+220:0:4651
+221:2:3927
+222:0:4651
+223:2:3928
+224:0:4651
+225:2:3936
+226:2:3937
+227:2:3941
+228:2:3945
+229:2:3946
+230:2:3950
+231:2:3958
+232:2:3959
+233:2:3963
+234:2:3967
+235:2:3968
+236:2:3963
+237:2:3967
+238:2:3968
+239:2:3972
+240:2:3979
+241:2:3986
+242:2:3987
+243:2:3994
+244:2:3999
+245:2:4006
+246:2:4007
+247:2:4006
+248:2:4007
+249:2:4014
+250:2:4018
+251:0:4651
+252:2:3129
+253:2:3811
+254:0:4651
+255:2:2939
+256:0:4651
+257:2:3130
+258:0:4651
+259:2:2939
+260:0:4651
+261:2:3133
+262:2:3134
+263:2:3138
+264:2:3139
+265:2:3147
+266:2:3148
+267:2:3152
+268:2:3153
+269:2:3161
+270:2:3166
+271:2:3170
+272:2:3171
+273:2:3179
+274:2:3180
+275:2:3184
+276:2:3185
+277:2:3179
+278:2:3180
+279:2:3184
+280:2:3185
+281:2:3193
+282:2:3198
+283:2:3199
+284:2:3210
+285:2:3211
+286:2:3212
+287:2:3223
+288:2:3228
+289:2:3229
+290:2:3240
+291:2:3241
+292:2:3242
+293:2:3240
+294:2:3241
+295:2:3242
+296:2:3253
+297:2:3260
+298:0:4651
+299:2:2939
+300:0:4651
+301:2:3264
+302:2:3265
+303:2:3266
+304:2:3278
+305:2:3279
+306:2:3283
+307:2:3284
+308:2:3292
+309:2:3297
+310:2:3301
+311:2:3302
+312:2:3310
+313:2:3311
+314:2:3315
+315:2:3316
+316:2:3310
+317:2:3311
+318:2:3315
+319:2:3316
+320:2:3324
+321:2:3329
+322:2:3330
+323:2:3341
+324:2:3342
+325:2:3343
+326:2:3354
+327:2:3359
+328:2:3360
+329:2:3371
+330:2:3372
+331:2:3373
+332:2:3371
+333:2:3372
+334:2:3373
+335:2:3384
+336:2:3394
+337:2:3395
+338:0:4651
+339:2:2939
+340:0:4651
+341:2:3799
+342:0:4651
+343:2:4424
+344:2:4425
+345:2:4429
+346:2:4433
+347:2:4434
+348:2:4438
+349:2:4446
+350:2:4447
+351:2:4451
+352:2:4455
+353:2:4456
+354:2:4451
+355:2:4455
+356:2:4456
+357:2:4460
+358:2:4467
+359:2:4474
+360:2:4475
+361:2:4482
+362:2:4487
+363:2:4494
+364:2:4495
+365:2:4494
+366:2:4495
+367:2:4502
+368:2:4506
+369:0:4651
+370:2:4511
+371:0:4651
+372:2:4512
+373:0:4651
+374:2:4513
+375:0:4651
+376:2:4514
+377:0:4651
+378:1:145
+379:0:4651
+380:2:4515
+381:0:4651
+382:1:147
+383:0:4651
+384:2:4514
+385:0:4651
+386:1:46
+387:0:4651
+388:2:4515
+389:0:4651
+390:1:153
+391:1:154
+392:1:158
+393:1:159
+394:1:167
+395:1:168
+396:1:172
+397:1:173
+398:1:181
+399:1:186
+400:1:190
+401:1:191
+402:1:199
+403:1:200
+404:1:204
+405:1:205
+406:1:199
+407:1:200
+408:1:204
+409:1:205
+410:1:213
+411:1:225
+412:1:226
+413:1:230
+414:1:231
+415:1:232
+416:1:243
+417:1:248
+418:1:249
+419:1:260
+420:1:261
+421:1:262
+422:1:260
+423:1:261
+424:1:262
+425:1:273
+426:0:4651
+427:1:42
+428:0:4651
+429:1:43
+430:0:4651
+431:2:4514
+432:0:4651
+433:1:44
+434:0:4651
+435:2:4515
+436:0:4651
+437:1:145
+438:0:4651
+439:1:147
+440:0:4651
+441:2:4514
+442:0:4651
+443:1:46
+444:0:4651
+445:2:4515
+446:0:4651
+447:1:282
+448:1:283
+449:0:4651
+450:1:42
+451:0:4651
+452:1:43
+453:0:4651
+454:2:4514
+455:0:4651
+456:1:44
+457:0:4651
+458:2:4515
+459:0:4651
+460:1:145
+461:0:4651
+462:1:147
+463:0:4651
+464:2:4514
+465:0:4651
+466:1:46
+467:0:4651
+468:2:4515
+469:0:4651
+470:1:289
+471:1:290
+472:1:294
+473:1:295
+474:1:303
+475:1:304
+476:1:308
+477:1:309
+478:1:317
+479:1:322
+480:1:326
+481:1:327
+482:1:335
+483:1:336
+484:1:340
+485:1:341
+486:1:335
+487:1:336
+488:1:340
+489:1:341
+490:1:349
+491:1:361
+492:1:362
+493:1:366
+494:1:367
+495:1:368
+496:1:379
+497:1:384
+498:1:385
+499:1:396
+500:1:397
+501:1:398
+502:1:396
+503:1:397
+504:1:398
+505:1:409
+506:0:4651
+507:1:42
+508:0:4651
+509:1:43
+510:0:4651
+511:2:4514
+512:0:4651
+513:1:44
+514:0:4651
+515:2:4515
+516:0:4651
+517:1:53
+518:0:4651
+519:2:4514
+520:0:4651
+521:1:57
+522:1:58
+523:1:62
+524:1:66
+525:1:67
+526:1:71
+527:1:79
+528:1:80
+529:1:84
+530:1:88
+531:1:89
+532:1:84
+533:1:88
+534:1:89
+535:1:93
+536:1:100
+537:1:107
+538:1:108
+539:1:115
+540:1:120
+541:1:127
+542:1:128
+543:1:127
+544:1:128
+545:1:135
+546:1:139
+547:0:4651
+548:2:4515
+549:0:4651
+550:1:144
+551:0:4651
+552:2:4516
+553:0:4651
+554:2:4521
+555:0:4651
+556:2:4522
+557:0:4651
+558:2:4530
+559:2:4531
+560:2:4535
+561:2:4539
+562:2:4540
+563:2:4544
+564:2:4552
+565:2:4553
+566:2:4557
+567:2:4561
+568:2:4562
+569:2:4557
+570:2:4561
+571:2:4562
+572:2:4566
+573:2:4573
+574:2:4580
+575:2:4581
+576:2:4588
+577:2:4593
+578:2:4600
+579:2:4601
+580:2:4600
+581:2:4601
+582:2:4608
+583:2:4612
+584:0:4651
+585:2:3801
+586:2:3811
+587:0:4651
+588:2:2939
+589:0:4651
+590:2:3802
+591:2:3803
+592:0:4651
+593:2:2939
+594:0:4651
+595:2:3807
+596:0:4651
+597:2:3815
+598:0:4651
+599:2:2932
+600:0:4651
+601:2:2934
+602:0:4651
+603:2:2935
+604:0:4651
+605:2:2936
+606:0:4651
+607:2:2937
+608:0:4651
+609:2:2938
+610:0:4651
+611:2:2939
+612:0:4651
+613:2:2940
+614:2:2941
+615:2:2945
+616:2:2946
+617:2:2954
+618:2:2955
+619:2:2959
+620:2:2960
+621:2:2968
+622:2:2973
+623:2:2977
+624:2:2978
+625:2:2986
+626:2:2987
+627:2:2988
+628:2:2986
+629:2:2987
+630:2:2991
+631:2:2992
+632:2:3000
+633:2:3005
+634:2:3006
+635:2:3017
+636:2:3018
+637:2:3019
+638:2:3030
+639:2:3035
+640:2:3036
+641:2:3047
+642:2:3048
+643:2:3049
+644:2:3047
+645:2:3048
+646:2:3049
+647:2:3060
+648:2:3068
+649:0:4651
+650:2:2939
+651:0:4651
+652:2:3072
+653:2:3076
+654:2:3077
+655:2:3081
+656:2:3085
+657:2:3086
+658:2:3090
+659:2:3098
+660:2:3099
+661:2:3103
+662:2:3104
+663:2:3103
+664:2:3107
+665:2:3108
+666:2:3112
+667:0:4651
+668:2:2939
+669:0:4651
+670:2:3120
+671:2:3121
+672:2:3122
+673:0:4651
+674:2:2939
+675:0:4651
+676:2:3127
+677:0:4651
+678:2:3830
+679:2:3831
+680:2:3835
+681:2:3839
+682:2:3840
+683:2:3844
+684:2:3849
+685:2:3857
+686:2:3861
+687:2:3862
+688:2:3857
+689:2:3861
+690:2:3862
+691:2:3866
+692:2:3873
+693:2:3880
+694:2:3881
+695:2:3888
+696:2:3893
+697:2:3900
+698:2:3901
+699:2:3900
+700:2:3901
+701:2:3908
+702:2:3912
+703:0:4651
+704:2:3917
+705:0:4651
+706:2:3918
+707:0:4651
+708:2:3919
+709:0:4651
+710:2:3920
+711:0:4651
+712:1:145
+713:0:4651
+714:2:3921
+715:0:4651
+716:1:147
+717:0:4651
+718:2:3920
+719:0:4651
+720:1:46
+721:0:4651
+722:2:3921
+723:0:4651
+724:1:418
+725:1:419
+726:1:423
+727:1:424
+728:1:432
+729:1:433
+730:1:437
+731:1:438
+732:1:446
+733:1:451
+734:1:455
+735:1:456
+736:1:464
+737:1:465
+738:1:469
+739:1:470
+740:1:464
+741:1:465
+742:1:469
+743:1:470
+744:1:478
+745:1:483
+746:1:484
+747:1:495
+748:1:496
+749:1:497
+750:1:508
+751:1:520
+752:1:521
+753:1:525
+754:1:526
+755:1:527
+756:1:525
+757:1:526
+758:1:527
+759:1:538
+760:1:545
+761:0:4651
+762:1:42
+763:0:4651
+764:1:43
+765:0:4651
+766:2:3920
+767:0:4651
+768:1:44
+769:0:4651
+770:2:3921
+771:0:4651
+772:1:145
+773:0:4651
+774:1:147
+775:0:4651
+776:2:3920
+777:0:4651
+778:1:46
+779:0:4651
+780:2:3921
+781:0:4651
+782:1:683
+783:1:684
+784:1:688
+785:1:689
+786:1:697
+787:1:698
+788:1:699
+789:1:711
+790:1:716
+791:1:720
+792:1:721
+793:1:729
+794:1:730
+795:1:734
+796:1:735
+797:1:729
+798:1:730
+799:1:734
+800:1:735
+801:1:743
+802:1:748
+803:1:749
+804:1:760
+805:1:761
+806:1:762
+807:1:773
+808:1:785
+809:1:786
+810:1:790
+811:1:791
+812:1:792
+813:1:790
+814:1:791
+815:1:792
+816:1:803
+817:0:4651
+818:1:42
+819:0:4651
+820:1:43
+821:0:4651
+822:2:3920
+823:0:4651
+824:1:44
+825:0:4651
+826:2:3921
+827:0:4651
+828:1:145
+829:0:4651
+830:1:147
+831:0:4651
+832:2:3920
+833:0:4651
+834:1:46
+835:0:4651
+836:2:3921
+837:0:4651
+838:1:812
+839:1:815
+840:1:816
+841:0:4651
+842:1:42
+843:0:4651
+844:1:43
+845:0:4651
+846:2:3920
+847:0:4651
+848:1:44
+849:0:4651
+850:2:3921
+851:0:4651
+852:1:145
+853:0:4651
+854:1:147
+855:0:4651
+856:2:3920
+857:0:4651
+858:1:46
+859:0:4651
+860:2:3921
+861:0:4651
+862:1:819
+863:1:820
+864:1:824
+865:1:825
+866:1:833
+867:1:834
+868:1:838
+869:1:839
+870:1:847
+871:1:852
+872:1:856
+873:1:857
+874:1:865
+875:1:866
+876:1:870
+877:1:871
+878:1:865
+879:1:866
+880:1:870
+881:1:871
+882:1:879
+883:1:884
+884:1:885
+885:1:896
+886:1:897
+887:1:898
+888:1:909
+889:1:921
+890:1:922
+891:1:926
+892:1:927
+893:1:928
+894:1:926
+895:1:927
+896:1:928
+897:1:939
+898:0:4651
+899:1:42
+900:0:4651
+901:1:43
+902:0:4651
+903:2:3920
+904:0:4651
+905:1:44
+906:0:4651
+907:2:3921
+908:0:4651
+909:1:145
+910:0:4651
+911:1:147
+912:0:4651
+913:2:3920
+914:0:4651
+915:1:46
+916:0:4651
+917:2:3921
+918:0:4651
+919:1:1079
+920:1:1080
+921:1:1084
+922:1:1085
+923:1:1093
+924:1:1094
+925:1:1098
+926:1:1099
+927:1:1107
+928:1:1112
+929:1:1116
+930:1:1117
+931:1:1125
+932:1:1126
+933:1:1130
+934:1:1131
+935:1:1125
+936:1:1126
+937:1:1130
+938:1:1131
+939:1:1139
+940:1:1144
+941:1:1145
+942:1:1156
+943:1:1157
+944:1:1158
+945:1:1169
+946:1:1181
+947:1:1182
+948:1:1186
+949:1:1187
+950:1:1188
+951:1:1186
+952:1:1187
+953:1:1188
+954:1:1199
+955:1:1206
+956:1:1210
+957:0:4651
+958:1:42
+959:0:4651
+960:1:43
+961:0:4651
+962:2:3920
+963:0:4651
+964:1:44
+965:0:4651
+966:2:3921
+967:0:4651
+968:1:145
+969:0:4651
+970:1:147
+971:0:4651
+972:2:3920
+973:0:4651
+974:1:46
+975:0:4651
+976:2:3921
+977:0:4651
+978:1:1211
+979:1:1212
+980:1:1216
+981:1:1217
+982:1:1225
+983:1:1226
+984:1:1227
+985:1:1239
+986:1:1244
+987:1:1248
+988:1:1249
+989:1:1257
+990:1:1258
+991:1:1262
+992:1:1263
+993:1:1257
+994:1:1258
+995:1:1262
+996:1:1263
+997:1:1271
+998:1:1276
+999:1:1277
+1000:1:1288
+1001:1:1289
+1002:1:1290
+1003:1:1301
+1004:1:1313
+1005:1:1314
+1006:1:1318
+1007:1:1319
+1008:1:1320
+1009:1:1318
+1010:1:1319
+1011:1:1320
+1012:1:1331
+1013:0:4651
+1014:1:42
+1015:0:4651
+1016:1:43
+1017:0:4651
+1018:2:3920
+1019:0:4651
+1020:1:44
+1021:0:4651
+1022:2:3921
+1023:0:4651
+1024:1:53
+1025:0:4651
+1026:2:3920
+1027:0:4651
+1028:1:57
+1029:1:58
+1030:1:62
+1031:1:66
+1032:1:67
+1033:1:71
+1034:1:79
+1035:1:80
+1036:1:84
+1037:1:88
+1038:1:89
+1039:1:84
+1040:1:88
+1041:1:89
+1042:1:93
+1043:1:100
+1044:1:107
+1045:1:108
+1046:1:115
+1047:1:120
+1048:1:127
+1049:1:128
+1050:1:127
+1051:1:128
+1052:1:135
+1053:1:139
+1054:0:4651
+1055:2:3921
+1056:0:4651
+1057:1:144
+1058:0:4651
+1059:2:3922
+1060:0:4651
+1061:2:3927
+1062:0:4651
+1063:2:3928
+1064:0:4651
+1065:2:3936
+1066:2:3937
+1067:2:3941
+1068:2:3945
+1069:2:3946
+1070:2:3950
+1071:2:3958
+1072:2:3959
+1073:2:3963
+1074:2:3967
+1075:2:3968
+1076:2:3963
+1077:2:3967
+1078:2:3968
+1079:2:3972
+1080:2:3979
+1081:2:3986
+1082:2:3987
+1083:2:3994
+1084:2:3999
+1085:2:4006
+1086:2:4007
+1087:2:4006
+1088:2:4007
+1089:2:4014
+1090:2:4018
+1091:0:4651
+1092:2:3129
+1093:2:3811
+1094:0:4651
+1095:2:2939
+1096:0:4651
+1097:2:3130
+1098:0:4651
+1099:2:2939
+1100:0:4651
+1101:2:3133
+1102:2:3134
+1103:2:3138
+1104:2:3139
+1105:2:3147
+1106:2:3148
+1107:2:3152
+1108:2:3153
+1109:2:3161
+1110:2:3166
+1111:2:3170
+1112:2:3171
+1113:2:3179
+1114:2:3180
+1115:2:3184
+1116:2:3185
+1117:2:3179
+1118:2:3180
+1119:2:3184
+1120:2:3185
+1121:2:3193
+1122:2:3198
+1123:2:3199
+1124:2:3210
+1125:2:3211
+1126:2:3212
+1127:2:3223
+1128:2:3228
+1129:2:3229
+1130:2:3240
+1131:2:3241
+1132:2:3242
+1133:2:3240
+1134:2:3241
+1135:2:3242
+1136:2:3253
+1137:2:3260
+1138:0:4651
+1139:2:2939
+1140:0:4651
+1141:2:3264
+1142:2:3265
+1143:2:3266
+1144:2:3278
+1145:2:3279
+1146:2:3283
+1147:2:3284
+1148:2:3292
+1149:2:3297
+1150:2:3301
+1151:2:3302
+1152:2:3310
+1153:2:3311
+1154:2:3315
+1155:2:3316
+1156:2:3310
+1157:2:3311
+1158:2:3315
+1159:2:3316
+1160:2:3324
+1161:2:3329
+1162:2:3330
+1163:2:3341
+1164:2:3342
+1165:2:3343
+1166:2:3354
+1167:2:3359
+1168:2:3360
+1169:2:3371
+1170:2:3372
+1171:2:3373
+1172:2:3371
+1173:2:3372
+1174:2:3373
+1175:2:3384
+1176:2:3394
+1177:2:3395
+1178:0:4651
+1179:2:2939
+1180:0:4651
+1181:2:3799
+1182:0:4651
+1183:2:4424
+1184:2:4425
+1185:2:4429
+1186:2:4433
+1187:2:4434
+1188:2:4438
+1189:2:4446
+1190:2:4447
+1191:2:4451
+1192:2:4455
+1193:2:4456
+1194:2:4451
+1195:2:4455
+1196:2:4456
+1197:2:4460
+1198:2:4467
+1199:2:4474
+1200:2:4475
+1201:2:4482
+1202:2:4487
+1203:2:4494
+1204:2:4495
+1205:2:4494
+1206:2:4495
+1207:2:4502
+1208:2:4506
+1209:0:4651
+1210:2:4511
+1211:0:4651
+1212:2:4512
+1213:0:4651
+1214:2:4513
+1215:0:4651
+1216:2:4514
+1217:0:4651
+1218:1:53
+1219:0:4651
+1220:2:4515
+1221:0:4651
+1222:1:57
+1223:1:58
+1224:1:62
+1225:1:66
+1226:1:67
+1227:1:71
+1228:1:79
+1229:1:80
+1230:1:84
+1231:1:88
+1232:1:89
+1233:1:84
+1234:1:88
+1235:1:89
+1236:1:93
+1237:1:100
+1238:1:107
+1239:1:108
+1240:1:115
+1241:1:120
+1242:1:127
+1243:1:128
+1244:1:127
+1245:1:128
+1246:1:135
+1247:1:139
+1248:0:4651
+1249:2:4514
+1250:0:4651
+1251:1:144
+1252:0:4651
+1253:2:4515
+1254:0:4651
+1255:2:4516
+1256:0:4651
+1257:2:4521
+1258:0:4651
+1259:2:4522
+1260:0:4651
+1261:2:4530
+1262:2:4531
+1263:2:4535
+1264:2:4539
+1265:2:4540
+1266:2:4544
+1267:2:4552
+1268:2:4553
+1269:2:4557
+1270:2:4561
+1271:2:4562
+1272:2:4557
+1273:2:4561
+1274:2:4562
+1275:2:4566
+1276:2:4573
+1277:2:4580
+1278:2:4581
+1279:2:4588
+1280:2:4593
+1281:2:4600
+1282:2:4601
+1283:2:4600
+1284:2:4601
+1285:2:4608
+1286:2:4612
+1287:0:4651
+1288:2:3801
+1289:2:3811
+1290:0:4651
+1291:2:2939
+1292:0:4651
+1293:2:3802
+1294:2:3803
+1295:0:4651
+1296:2:2939
+1297:0:4651
+1298:2:3807
+1299:0:4651
+1300:2:3815
+1301:0:4651
+1302:2:2932
+1303:0:4651
+1304:2:2934
+1305:0:4651
+1306:2:2935
+1307:0:4651
+1308:2:2936
+1309:0:4651
+1310:2:2937
+1311:0:4651
+1312:2:2938
+1313:0:4651
+1314:2:2939
+1315:0:4651
+1316:2:2940
+1317:2:2941
+1318:2:2945
+1319:2:2946
+1320:2:2954
+1321:2:2955
+1322:2:2959
+1323:2:2960
+1324:2:2968
+1325:2:2973
+1326:2:2977
+1327:2:2978
+1328:2:2986
+1329:2:2987
+1330:2:2991
+1331:2:2992
+1332:2:2986
+1333:2:2987
+1334:2:2988
+1335:2:3000
+1336:2:3005
+1337:2:3006
+1338:2:3017
+1339:2:3018
+1340:2:3019
+1341:2:3030
+1342:2:3035
+1343:2:3036
+1344:2:3047
+1345:2:3048
+1346:2:3049
+1347:2:3047
+1348:2:3048
+1349:2:3049
+1350:2:3060
+1351:2:3068
+1352:0:4651
+1353:2:2939
+1354:0:4651
+1355:1:145
+1356:0:4651
+1357:1:147
+1358:0:4651
+1359:1:46
+1360:0:4651
+1361:1:1340
+1362:0:4651
+1363:1:2804
+1364:1:2811
+1365:1:2812
+1366:1:2819
+1367:1:2824
+1368:1:2831
+1369:1:2832
+1370:1:2831
+1371:1:2832
+1372:1:2839
+1373:1:2843
+1374:0:4651
+1375:2:3072
+1376:2:3076
+1377:2:3077
+1378:2:3081
+1379:2:3085
+1380:2:3086
+1381:2:3090
+1382:2:3098
+1383:2:3099
+1384:2:3103
+1385:2:3107
+1386:2:3108
+1387:2:3103
+1388:2:3104
+1389:2:3112
+1390:0:4651
+1391:2:2939
+1392:0:4651
+1393:2:3120
+1394:2:3121
+1395:2:3122
+1396:0:4651
+1397:2:2939
+1398:0:4651
+1399:2:3127
+1400:0:4651
+1401:2:3830
+1402:2:3831
+1403:2:3835
+1404:2:3839
+1405:2:3840
+1406:2:3844
+1407:2:3849
+1408:2:3857
+1409:2:3861
+1410:2:3862
+1411:2:3857
+1412:2:3861
+1413:2:3862
+1414:2:3866
+1415:2:3873
+1416:2:3880
+1417:2:3881
+1418:2:3888
+1419:2:3893
+1420:2:3900
+1421:2:3901
+1422:2:3900
+1423:2:3901
+1424:2:3908
+1425:2:3912
+1426:0:4651
+1427:2:3917
+1428:0:4651
+1429:2:3918
+1430:0:4651
+1431:2:3919
+1432:0:4651
+1433:2:3920
+1434:0:4651
+1435:1:1342
+1436:1:1343
+1437:0:4649
+1438:2:3921
+1439:0:4655
+1440:1:2458
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_progress.ltl b/formal-model/urcu-controldataflow-alpha-ipi/urcu_progress.ltl
new file mode 100644 (file)
index 0000000..8718641
--- /dev/null
@@ -0,0 +1 @@
+([] <> !np_)
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_progress_reader.define b/formal-model/urcu-controldataflow-alpha-ipi/urcu_progress_reader.define
new file mode 100644 (file)
index 0000000..ff3f783
--- /dev/null
@@ -0,0 +1 @@
+#define READER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_progress_writer.define b/formal-model/urcu-controldataflow-alpha-ipi/urcu_progress_writer.define
new file mode 100644 (file)
index 0000000..1e4417f
--- /dev/null
@@ -0,0 +1 @@
+#define WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-ipi/urcu_progress_writer_error.define b/formal-model/urcu-controldataflow-alpha-ipi/urcu_progress_writer_error.define
new file mode 100644 (file)
index 0000000..8d304f5
--- /dev/null
@@ -0,0 +1,2 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/.input.spin b/formal-model/urcu-controldataflow-alpha-no-ipi/.input.spin
new file mode 100644 (file)
index 0000000..b59aa77
--- /dev/null
@@ -0,0 +1,1272 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/DEFINES b/formal-model/urcu-controldataflow-alpha-no-ipi/DEFINES
new file mode 100644 (file)
index 0000000..a1008a6
--- /dev/null
@@ -0,0 +1,18 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/Makefile b/formal-model/urcu-controldataflow-alpha-no-ipi/Makefile
new file mode 100644 (file)
index 0000000..de47dff
--- /dev/null
@@ -0,0 +1,170 @@
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) Mathieu Desnoyers, 2009
+#
+# Authors: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+#CFLAGS=-DSAFETY
+#for multi-core verif, 15.5GB shared mem, use files if full
+#CFLAGS=-DHASH64 -DMEMLIM=15500 -DNCORE=2
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88 -DMEMLIM=15500 -DNCORE=8
+
+#liveness
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88
+CFLAGS=-DHASH64
+
+SPINFILE=urcu.spin
+
+default:
+       make urcu_free | tee urcu_free.log
+       make urcu_free_no_mb | tee urcu_free_no_mb.log
+       make urcu_free_no_rmb | tee urcu_free_no_rmb.log
+       make urcu_free_no_wmb | tee urcu_free_no_wmb.log
+       make urcu_free_single_flip | tee urcu_free_single_flip.log
+       make urcu_progress_writer | tee urcu_progress_writer.log
+       make urcu_progress_reader | tee urcu_progress_reader.log
+       make urcu_progress_writer_error | tee urcu_progress_writer_error.log
+       make asserts | tee asserts.log
+       make summary
+
+#show trail : spin -v -t -N pan.ltl input.spin
+# after each individual make.
+
+summary:
+       @echo
+       @echo "Verification summary"
+       @grep errors: *.log
+
+asserts: clean
+       cat DEFINES > .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X .input.spin
+       gcc -O2 -w ${CFLAGS} -DSAFETY -o pan pan.c
+       ./pan -v -c1 -X -m10000000 -w20
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free: clean urcu_free_ltl run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested: clean urcu_free_ltl urcu_free_nested_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested_define:
+       cp urcu_free_nested.define .input.define
+
+urcu_free_no_rmb: clean urcu_free_ltl urcu_free_no_rmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_rmb_define:
+       cp urcu_free_no_rmb.define .input.define
+
+urcu_free_no_wmb: clean urcu_free_ltl urcu_free_no_wmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_wmb_define:
+       cp urcu_free_no_wmb.define .input.define
+
+urcu_free_no_mb: clean urcu_free_ltl urcu_free_no_mb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_mb_define:
+       cp urcu_free_no_mb.define .input.define
+
+urcu_free_single_flip: clean urcu_free_ltl urcu_free_single_flip_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_single_flip_define:
+       cp urcu_free_single_flip.define .input.define
+
+urcu_free_ltl:
+       touch .input.define
+       cat .input.define >> pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+
+# Progress checks
+
+urcu_progress_writer: clean urcu_progress_writer_ltl \
+               urcu_progress_writer_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_define:
+       cp urcu_progress_writer.define .input.define
+
+urcu_progress_writer_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_reader: clean urcu_progress_reader_ltl \
+               urcu_progress_reader_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_reader_define:
+       cp urcu_progress_reader.define .input.define
+
+urcu_progress_reader_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_writer_error: clean urcu_progress_writer_error_ltl \
+               urcu_progress_writer_error_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_error_define:
+       cp urcu_progress_writer_error.define .input.define
+
+urcu_progress_writer_error_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+
+run_weak_fair: pan
+       ./pan -a -f -v -c1 -X -m10000000 -w20
+
+run: pan
+       ./pan -a -v -c1 -X -m10000000 -w20
+
+pan: pan.c
+       gcc -O2 -w ${CFLAGS} -o pan pan.c
+
+pan.c: pan.ltl ${SPINFILE}
+       cat .input.define > .input.spin
+       cat DEFINES >> .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X -N pan.ltl .input.spin
+
+.PHONY: clean default distclean summary
+clean:
+       rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+distclean:
+       rm -f *.trail *.input *.log
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/asserts.log b/formal-model/urcu-controldataflow-alpha-no-ipi/asserts.log
new file mode 100644 (file)
index 0000000..9a18131
--- /dev/null
@@ -0,0 +1,468 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+cat DEFINES > .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w20
+Depth=    4773 States=    1e+06 Transitions= 6.22e+08 Memory=   542.717        t=    624 R=   2e+03
+Depth=    5040 States=    2e+06 Transitions=  1.3e+09 Memory=   618.986        t= 1.33e+03 R=   1e+03
+Depth=    5040 States=    3e+06 Transitions= 1.95e+09 Memory=   695.256        t= 2.03e+03 R=   1e+03
+pan: resizing hashtable to -w22..  done
+Depth=    5040 States=    4e+06 Transitions= 2.64e+09 Memory=   802.647        t= 2.73e+03 R=   1e+03
+Depth=    5040 States=    5e+06 Transitions=  3.3e+09 Memory=   878.916        t= 3.41e+03 R=   1e+03
+Depth=    5141 States=    6e+06 Transitions= 3.99e+09 Memory=   955.186        t= 4.12e+03 R=   1e+03
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             - (none specified)
+       assertion violations    +
+       cycle checks            - (disabled by -DSAFETY)
+       invalid end states      +
+
+State-vector 72 byte, depth reached 5141, errors: 0
+  6711104 states, stored
+4.4393201e+09 states, matched
+4.4460312e+09 transitions (= stored+matched)
+2.5322962e+10 atomic steps
+hash conflicts: 3.3332015e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+  640.021      equivalent memory usage for states (stored*(State-vector + overhead))
+  519.783      actual memory usage for states (compression: 81.21%)
+               state-vector as stored = 53 byte + 28 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+ 1009.483      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 410, ".input.spin", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 82, "(1)"
+       line 437, ".input.spin", state 112, "(1)"
+       line 441, ".input.spin", state 125, "(1)"
+       line 596, ".input.spin", state 146, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 410, ".input.spin", state 153, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 185, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 199, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 218, "(1)"
+       line 437, ".input.spin", state 248, "(1)"
+       line 441, ".input.spin", state 261, "(1)"
+       line 410, ".input.spin", state 282, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 314, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 347, "(1)"
+       line 437, ".input.spin", state 377, "(1)"
+       line 441, ".input.spin", state 390, "(1)"
+       line 410, ".input.spin", state 413, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, ".input.spin", state 415, "(1)"
+       line 410, ".input.spin", state 416, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, ".input.spin", state 416, "else"
+       line 410, ".input.spin", state 419, "(1)"
+       line 414, ".input.spin", state 427, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 429, "(1)"
+       line 414, ".input.spin", state 430, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, ".input.spin", state 430, "else"
+       line 414, ".input.spin", state 433, "(1)"
+       line 414, ".input.spin", state 434, "(1)"
+       line 414, ".input.spin", state 434, "(1)"
+       line 412, ".input.spin", state 439, "((i<1))"
+       line 412, ".input.spin", state 439, "((i>=1))"
+       line 419, ".input.spin", state 445, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 447, "(1)"
+       line 419, ".input.spin", state 448, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, ".input.spin", state 448, "else"
+       line 419, ".input.spin", state 451, "(1)"
+       line 419, ".input.spin", state 452, "(1)"
+       line 419, ".input.spin", state 452, "(1)"
+       line 423, ".input.spin", state 459, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 461, "(1)"
+       line 423, ".input.spin", state 462, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, ".input.spin", state 462, "else"
+       line 423, ".input.spin", state 465, "(1)"
+       line 423, ".input.spin", state 466, "(1)"
+       line 423, ".input.spin", state 466, "(1)"
+       line 421, ".input.spin", state 471, "((i<2))"
+       line 421, ".input.spin", state 471, "((i>=2))"
+       line 428, ".input.spin", state 478, "(1)"
+       line 428, ".input.spin", state 479, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 428, ".input.spin", state 479, "else"
+       line 428, ".input.spin", state 482, "(1)"
+       line 428, ".input.spin", state 483, "(1)"
+       line 428, ".input.spin", state 483, "(1)"
+       line 432, ".input.spin", state 491, "(1)"
+       line 432, ".input.spin", state 492, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 432, ".input.spin", state 492, "else"
+       line 432, ".input.spin", state 495, "(1)"
+       line 432, ".input.spin", state 496, "(1)"
+       line 432, ".input.spin", state 496, "(1)"
+       line 430, ".input.spin", state 501, "((i<1))"
+       line 430, ".input.spin", state 501, "((i>=1))"
+       line 437, ".input.spin", state 508, "(1)"
+       line 437, ".input.spin", state 509, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 437, ".input.spin", state 509, "else"
+       line 437, ".input.spin", state 512, "(1)"
+       line 437, ".input.spin", state 513, "(1)"
+       line 437, ".input.spin", state 513, "(1)"
+       line 441, ".input.spin", state 521, "(1)"
+       line 441, ".input.spin", state 522, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 441, ".input.spin", state 522, "else"
+       line 441, ".input.spin", state 525, "(1)"
+       line 441, ".input.spin", state 526, "(1)"
+       line 441, ".input.spin", state 526, "(1)"
+       line 439, ".input.spin", state 531, "((i<2))"
+       line 439, ".input.spin", state 531, "((i>=2))"
+       line 449, ".input.spin", state 535, "(1)"
+       line 449, ".input.spin", state 535, "(1)"
+       line 596, ".input.spin", state 538, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 596, ".input.spin", state 539, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 596, ".input.spin", state 540, "(1)"
+       line 271, ".input.spin", state 544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, ".input.spin", state 555, "(1)"
+       line 279, ".input.spin", state 566, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 575, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 591, "(1)"
+       line 252, ".input.spin", state 599, "(1)"
+       line 256, ".input.spin", state 611, "(1)"
+       line 260, ".input.spin", state 619, "(1)"
+       line 410, ".input.spin", state 637, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 651, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 669, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 683, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 702, "(1)"
+       line 432, ".input.spin", state 715, "(1)"
+       line 437, ".input.spin", state 732, "(1)"
+       line 441, ".input.spin", state 745, "(1)"
+       line 410, ".input.spin", state 773, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 805, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 819, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 838, "(1)"
+       line 437, ".input.spin", state 868, "(1)"
+       line 441, ".input.spin", state 881, "(1)"
+       line 410, ".input.spin", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, ".input.spin", state 904, "(1)"
+       line 410, ".input.spin", state 905, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, ".input.spin", state 905, "else"
+       line 410, ".input.spin", state 908, "(1)"
+       line 414, ".input.spin", state 916, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 918, "(1)"
+       line 414, ".input.spin", state 919, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, ".input.spin", state 919, "else"
+       line 414, ".input.spin", state 922, "(1)"
+       line 414, ".input.spin", state 923, "(1)"
+       line 414, ".input.spin", state 923, "(1)"
+       line 412, ".input.spin", state 928, "((i<1))"
+       line 412, ".input.spin", state 928, "((i>=1))"
+       line 419, ".input.spin", state 934, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 936, "(1)"
+       line 419, ".input.spin", state 937, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, ".input.spin", state 937, "else"
+       line 419, ".input.spin", state 940, "(1)"
+       line 419, ".input.spin", state 941, "(1)"
+       line 419, ".input.spin", state 941, "(1)"
+       line 423, ".input.spin", state 948, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 950, "(1)"
+       line 423, ".input.spin", state 951, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, ".input.spin", state 951, "else"
+       line 423, ".input.spin", state 954, "(1)"
+       line 423, ".input.spin", state 955, "(1)"
+       line 423, ".input.spin", state 955, "(1)"
+       line 421, ".input.spin", state 960, "((i<2))"
+       line 421, ".input.spin", state 960, "((i>=2))"
+       line 428, ".input.spin", state 967, "(1)"
+       line 428, ".input.spin", state 968, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 428, ".input.spin", state 968, "else"
+       line 428, ".input.spin", state 971, "(1)"
+       line 428, ".input.spin", state 972, "(1)"
+       line 428, ".input.spin", state 972, "(1)"
+       line 432, ".input.spin", state 980, "(1)"
+       line 432, ".input.spin", state 981, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 432, ".input.spin", state 981, "else"
+       line 432, ".input.spin", state 984, "(1)"
+       line 432, ".input.spin", state 985, "(1)"
+       line 432, ".input.spin", state 985, "(1)"
+       line 430, ".input.spin", state 990, "((i<1))"
+       line 430, ".input.spin", state 990, "((i>=1))"
+       line 437, ".input.spin", state 997, "(1)"
+       line 437, ".input.spin", state 998, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 437, ".input.spin", state 998, "else"
+       line 437, ".input.spin", state 1001, "(1)"
+       line 437, ".input.spin", state 1002, "(1)"
+       line 437, ".input.spin", state 1002, "(1)"
+       line 441, ".input.spin", state 1010, "(1)"
+       line 441, ".input.spin", state 1011, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 441, ".input.spin", state 1011, "else"
+       line 441, ".input.spin", state 1014, "(1)"
+       line 441, ".input.spin", state 1015, "(1)"
+       line 441, ".input.spin", state 1015, "(1)"
+       line 439, ".input.spin", state 1020, "((i<2))"
+       line 439, ".input.spin", state 1020, "((i>=2))"
+       line 449, ".input.spin", state 1024, "(1)"
+       line 449, ".input.spin", state 1024, "(1)"
+       line 604, ".input.spin", state 1028, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 410, ".input.spin", state 1033, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 1047, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1065, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1079, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 1098, "(1)"
+       line 432, ".input.spin", state 1111, "(1)"
+       line 437, ".input.spin", state 1128, "(1)"
+       line 441, ".input.spin", state 1141, "(1)"
+       line 410, ".input.spin", state 1165, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1197, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1211, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 1230, "(1)"
+       line 437, ".input.spin", state 1260, "(1)"
+       line 441, ".input.spin", state 1273, "(1)"
+       line 410, ".input.spin", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 1363, "(1)"
+       line 437, ".input.spin", state 1393, "(1)"
+       line 441, ".input.spin", state 1406, "(1)"
+       line 410, ".input.spin", state 1427, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 1492, "(1)"
+       line 437, ".input.spin", state 1522, "(1)"
+       line 441, ".input.spin", state 1535, "(1)"
+       line 271, ".input.spin", state 1558, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 1580, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 1589, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1605, "(1)"
+       line 252, ".input.spin", state 1613, "(1)"
+       line 256, ".input.spin", state 1625, "(1)"
+       line 260, ".input.spin", state 1633, "(1)"
+       line 410, ".input.spin", state 1651, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 1665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 1716, "(1)"
+       line 432, ".input.spin", state 1729, "(1)"
+       line 437, ".input.spin", state 1746, "(1)"
+       line 441, ".input.spin", state 1759, "(1)"
+       line 410, ".input.spin", state 1780, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 1794, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1812, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 1845, "(1)"
+       line 432, ".input.spin", state 1858, "(1)"
+       line 437, ".input.spin", state 1875, "(1)"
+       line 441, ".input.spin", state 1888, "(1)"
+       line 410, ".input.spin", state 1912, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1944, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1958, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 1977, "(1)"
+       line 437, ".input.spin", state 2007, "(1)"
+       line 441, ".input.spin", state 2020, "(1)"
+       line 643, ".input.spin", state 2041, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 410, ".input.spin", state 2048, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 2080, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 2094, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 2113, "(1)"
+       line 437, ".input.spin", state 2143, "(1)"
+       line 441, ".input.spin", state 2156, "(1)"
+       line 410, ".input.spin", state 2177, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 2209, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 2223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 2242, "(1)"
+       line 437, ".input.spin", state 2272, "(1)"
+       line 441, ".input.spin", state 2285, "(1)"
+       line 410, ".input.spin", state 2308, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, ".input.spin", state 2310, "(1)"
+       line 410, ".input.spin", state 2311, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, ".input.spin", state 2311, "else"
+       line 410, ".input.spin", state 2314, "(1)"
+       line 414, ".input.spin", state 2322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 2324, "(1)"
+       line 414, ".input.spin", state 2325, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, ".input.spin", state 2325, "else"
+       line 414, ".input.spin", state 2328, "(1)"
+       line 414, ".input.spin", state 2329, "(1)"
+       line 414, ".input.spin", state 2329, "(1)"
+       line 412, ".input.spin", state 2334, "((i<1))"
+       line 412, ".input.spin", state 2334, "((i>=1))"
+       line 419, ".input.spin", state 2340, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 2342, "(1)"
+       line 419, ".input.spin", state 2343, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, ".input.spin", state 2343, "else"
+       line 419, ".input.spin", state 2346, "(1)"
+       line 419, ".input.spin", state 2347, "(1)"
+       line 419, ".input.spin", state 2347, "(1)"
+       line 423, ".input.spin", state 2354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 2356, "(1)"
+       line 423, ".input.spin", state 2357, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, ".input.spin", state 2357, "else"
+       line 423, ".input.spin", state 2360, "(1)"
+       line 423, ".input.spin", state 2361, "(1)"
+       line 423, ".input.spin", state 2361, "(1)"
+       line 421, ".input.spin", state 2366, "((i<2))"
+       line 421, ".input.spin", state 2366, "((i>=2))"
+       line 428, ".input.spin", state 2373, "(1)"
+       line 428, ".input.spin", state 2374, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 428, ".input.spin", state 2374, "else"
+       line 428, ".input.spin", state 2377, "(1)"
+       line 428, ".input.spin", state 2378, "(1)"
+       line 428, ".input.spin", state 2378, "(1)"
+       line 432, ".input.spin", state 2386, "(1)"
+       line 432, ".input.spin", state 2387, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 432, ".input.spin", state 2387, "else"
+       line 432, ".input.spin", state 2390, "(1)"
+       line 432, ".input.spin", state 2391, "(1)"
+       line 432, ".input.spin", state 2391, "(1)"
+       line 430, ".input.spin", state 2396, "((i<1))"
+       line 430, ".input.spin", state 2396, "((i>=1))"
+       line 437, ".input.spin", state 2403, "(1)"
+       line 437, ".input.spin", state 2404, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 437, ".input.spin", state 2404, "else"
+       line 437, ".input.spin", state 2407, "(1)"
+       line 437, ".input.spin", state 2408, "(1)"
+       line 437, ".input.spin", state 2408, "(1)"
+       line 441, ".input.spin", state 2416, "(1)"
+       line 441, ".input.spin", state 2417, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 441, ".input.spin", state 2417, "else"
+       line 441, ".input.spin", state 2420, "(1)"
+       line 441, ".input.spin", state 2421, "(1)"
+       line 441, ".input.spin", state 2421, "(1)"
+       line 439, ".input.spin", state 2426, "((i<2))"
+       line 439, ".input.spin", state 2426, "((i>=2))"
+       line 449, ".input.spin", state 2430, "(1)"
+       line 449, ".input.spin", state 2430, "(1)"
+       line 643, ".input.spin", state 2433, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 643, ".input.spin", state 2434, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 643, ".input.spin", state 2435, "(1)"
+       line 271, ".input.spin", state 2439, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 2461, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 2470, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 2486, "(1)"
+       line 252, ".input.spin", state 2494, "(1)"
+       line 256, ".input.spin", state 2506, "(1)"
+       line 260, ".input.spin", state 2514, "(1)"
+       line 410, ".input.spin", state 2532, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 2546, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 2564, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 2578, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 2597, "(1)"
+       line 432, ".input.spin", state 2610, "(1)"
+       line 437, ".input.spin", state 2627, "(1)"
+       line 441, ".input.spin", state 2640, "(1)"
+       line 271, ".input.spin", state 2664, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, ".input.spin", state 2673, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 2686, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 2695, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 2711, "(1)"
+       line 252, ".input.spin", state 2719, "(1)"
+       line 256, ".input.spin", state 2731, "(1)"
+       line 260, ".input.spin", state 2739, "(1)"
+       line 410, ".input.spin", state 2757, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 2771, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 2789, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 2803, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 2822, "(1)"
+       line 432, ".input.spin", state 2835, "(1)"
+       line 437, ".input.spin", state 2852, "(1)"
+       line 441, ".input.spin", state 2865, "(1)"
+       line 410, ".input.spin", state 2886, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 2900, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 2918, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 2932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 2951, "(1)"
+       line 432, ".input.spin", state 2964, "(1)"
+       line 437, ".input.spin", state 2981, "(1)"
+       line 441, ".input.spin", state 2994, "(1)"
+       line 248, ".input.spin", state 3027, "(1)"
+       line 256, ".input.spin", state 3047, "(1)"
+       line 260, ".input.spin", state 3055, "(1)"
+       line 248, ".input.spin", state 3070, "(1)"
+       line 252, ".input.spin", state 3078, "(1)"
+       line 256, ".input.spin", state 3090, "(1)"
+       line 260, ".input.spin", state 3098, "(1)"
+       line 897, ".input.spin", state 3115, "-end-"
+       (283 of 3115 states)
+unreached in proctype urcu_writer
+       line 410, ".input.spin", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 83, "(1)"
+       line 432, ".input.spin", state 96, "(1)"
+       line 437, ".input.spin", state 113, "(1)"
+       line 271, ".input.spin", state 149, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, ".input.spin", state 158, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 171, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, ".input.spin", state 211, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 225, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 243, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 257, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, ".input.spin", state 276, "(1)"
+       line 432, ".input.spin", state 289, "(1)"
+       line 437, ".input.spin", state 306, "(1)"
+       line 441, ".input.spin", state 319, "(1)"
+       line 414, ".input.spin", state 356, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 374, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 388, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 432, ".input.spin", state 420, "(1)"
+       line 437, ".input.spin", state 437, "(1)"
+       line 441, ".input.spin", state 450, "(1)"
+       line 414, ".input.spin", state 495, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 513, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 527, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 432, ".input.spin", state 559, "(1)"
+       line 437, ".input.spin", state 576, "(1)"
+       line 441, ".input.spin", state 589, "(1)"
+       line 414, ".input.spin", state 624, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 642, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 656, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 432, ".input.spin", state 688, "(1)"
+       line 437, ".input.spin", state 705, "(1)"
+       line 441, ".input.spin", state 718, "(1)"
+       line 414, ".input.spin", state 755, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 773, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 787, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 432, ".input.spin", state 819, "(1)"
+       line 437, ".input.spin", state 836, "(1)"
+       line 441, ".input.spin", state 849, "(1)"
+       line 271, ".input.spin", state 904, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, ".input.spin", state 913, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 928, "(1)"
+       line 283, ".input.spin", state 935, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 951, "(1)"
+       line 252, ".input.spin", state 959, "(1)"
+       line 256, ".input.spin", state 971, "(1)"
+       line 260, ".input.spin", state 979, "(1)"
+       line 275, ".input.spin", state 1004, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 1017, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 1026, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1042, "(1)"
+       line 252, ".input.spin", state 1050, "(1)"
+       line 256, ".input.spin", state 1062, "(1)"
+       line 260, ".input.spin", state 1070, "(1)"
+       line 275, ".input.spin", state 1095, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 1108, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 1117, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1133, "(1)"
+       line 252, ".input.spin", state 1141, "(1)"
+       line 256, ".input.spin", state 1153, "(1)"
+       line 260, ".input.spin", state 1161, "(1)"
+       line 275, ".input.spin", state 1186, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 1199, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 1208, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1224, "(1)"
+       line 252, ".input.spin", state 1232, "(1)"
+       line 256, ".input.spin", state 1244, "(1)"
+       line 260, ".input.spin", state 1252, "(1)"
+       line 1236, ".input.spin", state 1267, "-end-"
+       (71 of 1267 states)
+unreached in proctype :init:
+       (0 of 78 states)
+
+pan: elapsed time 4.59e+03 seconds
+pan: rate 1460.6376 states/second
+pan: avg transition delay 1.0334e-06 usec
+cp .input.spin asserts.spin.input
+cp .input.spin.trail asserts.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/asserts.spin.input b/formal-model/urcu-controldataflow-alpha-no-ipi/asserts.spin.input
new file mode 100644 (file)
index 0000000..b59aa77
--- /dev/null
@@ -0,0 +1,1272 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/references.txt b/formal-model/urcu-controldataflow-alpha-no-ipi/references.txt
new file mode 100644 (file)
index 0000000..72c67a2
--- /dev/null
@@ -0,0 +1,13 @@
+http://spinroot.com/spin/Man/ltl.html
+http://en.wikipedia.org/wiki/Linear_temporal_logic
+http://www.dcs.gla.ac.uk/~muffy/MRS4-2002/lect11.ppt
+
+http://www.lsv.ens-cachan.fr/~gastin/ltl2ba/index.php
+http://spinroot.com/spin/Man/index.html
+http://spinroot.com/spin/Man/promela.html
+
+LTL vs CTL :
+
+http://spinroot.com/spin/Doc/course/lecture12.pdf p. 9, p. 15, p. 18
+http://www-i2.informatik.rwth-aachen.de/i2/fileadmin/user_upload/documents/Introduction_to_Model_Checking/mc_lec18.pdf
+  (downloaded)
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu.sh b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu.sh
new file mode 100644 (file)
index 0000000..65ff517
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Compiles and runs the urcu.spin Promela model.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) IBM Corporation, 2009
+#               Mathieu Desnoyers, 2009
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+#          Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+# Basic execution, without LTL clauses. See Makefile.
+
+spin -a urcu.spin
+cc -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w21
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu.spin b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu.spin
new file mode 100644 (file)
index 0000000..54752a1
--- /dev/null
@@ -0,0 +1,1254 @@
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free.log b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free.log
new file mode 100644 (file)
index 0000000..0802f77
--- /dev/null
@@ -0,0 +1,479 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1294)
+Depth=    5182 States=    1e+06 Transitions= 6.22e+08 Memory=   550.432        t=    762 R=   1e+03
+Depth=    5473 States=    2e+06 Transitions=  1.3e+09 Memory=   634.318        t= 1.62e+03 R=   1e+03
+Depth=    5473 States=    3e+06 Transitions= 1.95e+09 Memory=   718.303        t= 2.47e+03 R=   1e+03
+pan: resizing hashtable to -w22..  done
+Depth=    5473 States=    4e+06 Transitions= 2.64e+09 Memory=   833.311        t= 3.32e+03 R=   1e+03
+Depth=    5473 States=    5e+06 Transitions=  3.3e+09 Memory=   917.295        t= 4.14e+03 R=   1e+03
+Depth=    5582 States=    6e+06 Transitions= 3.99e+09 Memory=  1001.279        t=  5e+03 R=   1e+03
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 5582, errors: 0
+  6711104 states, stored
+4.4393362e+09 states, matched
+4.4460473e+09 transitions (= stored+matched)
+2.5322962e+10 atomic steps
+hash conflicts: 3.3328749e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+  742.424      equivalent memory usage for states (stored*(State-vector + overhead))
+  571.575      actual memory usage for states (compression: 76.99%)
+               state-vector as stored = 61 byte + 28 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+ 1060.947      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 410, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 82, "(1)"
+       line 437, "pan.___", state 112, "(1)"
+       line 441, "pan.___", state 125, "(1)"
+       line 596, "pan.___", state 146, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 410, "pan.___", state 153, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 185, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 199, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 218, "(1)"
+       line 437, "pan.___", state 248, "(1)"
+       line 441, "pan.___", state 261, "(1)"
+       line 410, "pan.___", state 282, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 314, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 347, "(1)"
+       line 437, "pan.___", state 377, "(1)"
+       line 441, "pan.___", state 390, "(1)"
+       line 410, "pan.___", state 413, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 415, "(1)"
+       line 410, "pan.___", state 416, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 416, "else"
+       line 410, "pan.___", state 419, "(1)"
+       line 414, "pan.___", state 427, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 429, "(1)"
+       line 414, "pan.___", state 430, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 430, "else"
+       line 414, "pan.___", state 433, "(1)"
+       line 414, "pan.___", state 434, "(1)"
+       line 414, "pan.___", state 434, "(1)"
+       line 412, "pan.___", state 439, "((i<1))"
+       line 412, "pan.___", state 439, "((i>=1))"
+       line 419, "pan.___", state 445, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 447, "(1)"
+       line 419, "pan.___", state 448, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, "pan.___", state 448, "else"
+       line 419, "pan.___", state 451, "(1)"
+       line 419, "pan.___", state 452, "(1)"
+       line 419, "pan.___", state 452, "(1)"
+       line 423, "pan.___", state 459, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 461, "(1)"
+       line 423, "pan.___", state 462, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, "pan.___", state 462, "else"
+       line 423, "pan.___", state 465, "(1)"
+       line 423, "pan.___", state 466, "(1)"
+       line 423, "pan.___", state 466, "(1)"
+       line 421, "pan.___", state 471, "((i<2))"
+       line 421, "pan.___", state 471, "((i>=2))"
+       line 428, "pan.___", state 478, "(1)"
+       line 428, "pan.___", state 479, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 428, "pan.___", state 479, "else"
+       line 428, "pan.___", state 482, "(1)"
+       line 428, "pan.___", state 483, "(1)"
+       line 428, "pan.___", state 483, "(1)"
+       line 432, "pan.___", state 491, "(1)"
+       line 432, "pan.___", state 492, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 432, "pan.___", state 492, "else"
+       line 432, "pan.___", state 495, "(1)"
+       line 432, "pan.___", state 496, "(1)"
+       line 432, "pan.___", state 496, "(1)"
+       line 430, "pan.___", state 501, "((i<1))"
+       line 430, "pan.___", state 501, "((i>=1))"
+       line 437, "pan.___", state 508, "(1)"
+       line 437, "pan.___", state 509, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 437, "pan.___", state 509, "else"
+       line 437, "pan.___", state 512, "(1)"
+       line 437, "pan.___", state 513, "(1)"
+       line 437, "pan.___", state 513, "(1)"
+       line 441, "pan.___", state 521, "(1)"
+       line 441, "pan.___", state 522, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 441, "pan.___", state 522, "else"
+       line 441, "pan.___", state 525, "(1)"
+       line 441, "pan.___", state 526, "(1)"
+       line 441, "pan.___", state 526, "(1)"
+       line 439, "pan.___", state 531, "((i<2))"
+       line 439, "pan.___", state 531, "((i>=2))"
+       line 449, "pan.___", state 535, "(1)"
+       line 449, "pan.___", state 535, "(1)"
+       line 596, "pan.___", state 538, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 596, "pan.___", state 539, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 596, "pan.___", state 540, "(1)"
+       line 271, "pan.___", state 544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 555, "(1)"
+       line 279, "pan.___", state 566, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 575, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 591, "(1)"
+       line 252, "pan.___", state 599, "(1)"
+       line 256, "pan.___", state 611, "(1)"
+       line 260, "pan.___", state 619, "(1)"
+       line 410, "pan.___", state 637, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 651, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 669, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 683, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 702, "(1)"
+       line 432, "pan.___", state 715, "(1)"
+       line 437, "pan.___", state 732, "(1)"
+       line 441, "pan.___", state 745, "(1)"
+       line 410, "pan.___", state 773, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 805, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 819, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 838, "(1)"
+       line 437, "pan.___", state 868, "(1)"
+       line 441, "pan.___", state 881, "(1)"
+       line 410, "pan.___", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 904, "(1)"
+       line 410, "pan.___", state 905, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 905, "else"
+       line 410, "pan.___", state 908, "(1)"
+       line 414, "pan.___", state 916, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 918, "(1)"
+       line 414, "pan.___", state 919, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 919, "else"
+       line 414, "pan.___", state 922, "(1)"
+       line 414, "pan.___", state 923, "(1)"
+       line 414, "pan.___", state 923, "(1)"
+       line 412, "pan.___", state 928, "((i<1))"
+       line 412, "pan.___", state 928, "((i>=1))"
+       line 419, "pan.___", state 934, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 936, "(1)"
+       line 419, "pan.___", state 937, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, "pan.___", state 937, "else"
+       line 419, "pan.___", state 940, "(1)"
+       line 419, "pan.___", state 941, "(1)"
+       line 419, "pan.___", state 941, "(1)"
+       line 423, "pan.___", state 948, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 950, "(1)"
+       line 423, "pan.___", state 951, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, "pan.___", state 951, "else"
+       line 423, "pan.___", state 954, "(1)"
+       line 423, "pan.___", state 955, "(1)"
+       line 423, "pan.___", state 955, "(1)"
+       line 421, "pan.___", state 960, "((i<2))"
+       line 421, "pan.___", state 960, "((i>=2))"
+       line 428, "pan.___", state 967, "(1)"
+       line 428, "pan.___", state 968, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 428, "pan.___", state 968, "else"
+       line 428, "pan.___", state 971, "(1)"
+       line 428, "pan.___", state 972, "(1)"
+       line 428, "pan.___", state 972, "(1)"
+       line 432, "pan.___", state 980, "(1)"
+       line 432, "pan.___", state 981, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 432, "pan.___", state 981, "else"
+       line 432, "pan.___", state 984, "(1)"
+       line 432, "pan.___", state 985, "(1)"
+       line 432, "pan.___", state 985, "(1)"
+       line 430, "pan.___", state 990, "((i<1))"
+       line 430, "pan.___", state 990, "((i>=1))"
+       line 437, "pan.___", state 997, "(1)"
+       line 437, "pan.___", state 998, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 437, "pan.___", state 998, "else"
+       line 437, "pan.___", state 1001, "(1)"
+       line 437, "pan.___", state 1002, "(1)"
+       line 437, "pan.___", state 1002, "(1)"
+       line 441, "pan.___", state 1010, "(1)"
+       line 441, "pan.___", state 1011, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 441, "pan.___", state 1011, "else"
+       line 441, "pan.___", state 1014, "(1)"
+       line 441, "pan.___", state 1015, "(1)"
+       line 441, "pan.___", state 1015, "(1)"
+       line 439, "pan.___", state 1020, "((i<2))"
+       line 439, "pan.___", state 1020, "((i>=2))"
+       line 449, "pan.___", state 1024, "(1)"
+       line 449, "pan.___", state 1024, "(1)"
+       line 604, "pan.___", state 1028, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 410, "pan.___", state 1033, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1047, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1065, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1079, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 1098, "(1)"
+       line 432, "pan.___", state 1111, "(1)"
+       line 437, "pan.___", state 1128, "(1)"
+       line 441, "pan.___", state 1141, "(1)"
+       line 410, "pan.___", state 1165, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1197, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1211, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 1230, "(1)"
+       line 437, "pan.___", state 1260, "(1)"
+       line 441, "pan.___", state 1273, "(1)"
+       line 410, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 1363, "(1)"
+       line 437, "pan.___", state 1393, "(1)"
+       line 441, "pan.___", state 1406, "(1)"
+       line 410, "pan.___", state 1427, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 1492, "(1)"
+       line 437, "pan.___", state 1522, "(1)"
+       line 441, "pan.___", state 1535, "(1)"
+       line 271, "pan.___", state 1558, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1580, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1589, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1605, "(1)"
+       line 252, "pan.___", state 1613, "(1)"
+       line 256, "pan.___", state 1625, "(1)"
+       line 260, "pan.___", state 1633, "(1)"
+       line 410, "pan.___", state 1651, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 1716, "(1)"
+       line 432, "pan.___", state 1729, "(1)"
+       line 437, "pan.___", state 1746, "(1)"
+       line 441, "pan.___", state 1759, "(1)"
+       line 410, "pan.___", state 1780, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1794, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1812, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 1845, "(1)"
+       line 432, "pan.___", state 1858, "(1)"
+       line 437, "pan.___", state 1875, "(1)"
+       line 441, "pan.___", state 1888, "(1)"
+       line 410, "pan.___", state 1912, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1944, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1958, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 1977, "(1)"
+       line 437, "pan.___", state 2007, "(1)"
+       line 441, "pan.___", state 2020, "(1)"
+       line 643, "pan.___", state 2041, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 410, "pan.___", state 2048, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2080, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2094, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 2113, "(1)"
+       line 437, "pan.___", state 2143, "(1)"
+       line 441, "pan.___", state 2156, "(1)"
+       line 410, "pan.___", state 2177, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2209, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 2242, "(1)"
+       line 437, "pan.___", state 2272, "(1)"
+       line 441, "pan.___", state 2285, "(1)"
+       line 410, "pan.___", state 2308, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2310, "(1)"
+       line 410, "pan.___", state 2311, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 2311, "else"
+       line 410, "pan.___", state 2314, "(1)"
+       line 414, "pan.___", state 2322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2324, "(1)"
+       line 414, "pan.___", state 2325, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 2325, "else"
+       line 414, "pan.___", state 2328, "(1)"
+       line 414, "pan.___", state 2329, "(1)"
+       line 414, "pan.___", state 2329, "(1)"
+       line 412, "pan.___", state 2334, "((i<1))"
+       line 412, "pan.___", state 2334, "((i>=1))"
+       line 419, "pan.___", state 2340, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2342, "(1)"
+       line 419, "pan.___", state 2343, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, "pan.___", state 2343, "else"
+       line 419, "pan.___", state 2346, "(1)"
+       line 419, "pan.___", state 2347, "(1)"
+       line 419, "pan.___", state 2347, "(1)"
+       line 423, "pan.___", state 2354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2356, "(1)"
+       line 423, "pan.___", state 2357, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, "pan.___", state 2357, "else"
+       line 423, "pan.___", state 2360, "(1)"
+       line 423, "pan.___", state 2361, "(1)"
+       line 423, "pan.___", state 2361, "(1)"
+       line 421, "pan.___", state 2366, "((i<2))"
+       line 421, "pan.___", state 2366, "((i>=2))"
+       line 428, "pan.___", state 2373, "(1)"
+       line 428, "pan.___", state 2374, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 428, "pan.___", state 2374, "else"
+       line 428, "pan.___", state 2377, "(1)"
+       line 428, "pan.___", state 2378, "(1)"
+       line 428, "pan.___", state 2378, "(1)"
+       line 432, "pan.___", state 2386, "(1)"
+       line 432, "pan.___", state 2387, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 432, "pan.___", state 2387, "else"
+       line 432, "pan.___", state 2390, "(1)"
+       line 432, "pan.___", state 2391, "(1)"
+       line 432, "pan.___", state 2391, "(1)"
+       line 430, "pan.___", state 2396, "((i<1))"
+       line 430, "pan.___", state 2396, "((i>=1))"
+       line 437, "pan.___", state 2403, "(1)"
+       line 437, "pan.___", state 2404, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 437, "pan.___", state 2404, "else"
+       line 437, "pan.___", state 2407, "(1)"
+       line 437, "pan.___", state 2408, "(1)"
+       line 437, "pan.___", state 2408, "(1)"
+       line 441, "pan.___", state 2416, "(1)"
+       line 441, "pan.___", state 2417, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 441, "pan.___", state 2417, "else"
+       line 441, "pan.___", state 2420, "(1)"
+       line 441, "pan.___", state 2421, "(1)"
+       line 441, "pan.___", state 2421, "(1)"
+       line 439, "pan.___", state 2426, "((i<2))"
+       line 439, "pan.___", state 2426, "((i>=2))"
+       line 449, "pan.___", state 2430, "(1)"
+       line 449, "pan.___", state 2430, "(1)"
+       line 643, "pan.___", state 2433, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 643, "pan.___", state 2434, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 643, "pan.___", state 2435, "(1)"
+       line 271, "pan.___", state 2439, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 2461, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 2470, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 2486, "(1)"
+       line 252, "pan.___", state 2494, "(1)"
+       line 256, "pan.___", state 2506, "(1)"
+       line 260, "pan.___", state 2514, "(1)"
+       line 410, "pan.___", state 2532, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2546, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2564, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2578, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 2597, "(1)"
+       line 432, "pan.___", state 2610, "(1)"
+       line 437, "pan.___", state 2627, "(1)"
+       line 441, "pan.___", state 2640, "(1)"
+       line 271, "pan.___", state 2664, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 2673, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 2686, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 2695, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 2711, "(1)"
+       line 252, "pan.___", state 2719, "(1)"
+       line 256, "pan.___", state 2731, "(1)"
+       line 260, "pan.___", state 2739, "(1)"
+       line 410, "pan.___", state 2757, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2771, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2789, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2803, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 2822, "(1)"
+       line 432, "pan.___", state 2835, "(1)"
+       line 437, "pan.___", state 2852, "(1)"
+       line 441, "pan.___", state 2865, "(1)"
+       line 410, "pan.___", state 2886, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2900, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2918, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 2951, "(1)"
+       line 432, "pan.___", state 2964, "(1)"
+       line 437, "pan.___", state 2981, "(1)"
+       line 441, "pan.___", state 2994, "(1)"
+       line 248, "pan.___", state 3027, "(1)"
+       line 256, "pan.___", state 3047, "(1)"
+       line 260, "pan.___", state 3055, "(1)"
+       line 248, "pan.___", state 3070, "(1)"
+       line 252, "pan.___", state 3078, "(1)"
+       line 256, "pan.___", state 3090, "(1)"
+       line 260, "pan.___", state 3098, "(1)"
+       line 897, "pan.___", state 3115, "-end-"
+       (283 of 3115 states)
+unreached in proctype urcu_writer
+       line 410, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 83, "(1)"
+       line 432, "pan.___", state 96, "(1)"
+       line 437, "pan.___", state 113, "(1)"
+       line 271, "pan.___", state 149, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 158, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 171, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 211, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 225, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 243, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 257, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 428, "pan.___", state 276, "(1)"
+       line 432, "pan.___", state 289, "(1)"
+       line 437, "pan.___", state 306, "(1)"
+       line 441, "pan.___", state 319, "(1)"
+       line 414, "pan.___", state 356, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 374, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 388, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 432, "pan.___", state 420, "(1)"
+       line 437, "pan.___", state 437, "(1)"
+       line 441, "pan.___", state 450, "(1)"
+       line 414, "pan.___", state 495, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 513, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 527, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 432, "pan.___", state 559, "(1)"
+       line 437, "pan.___", state 576, "(1)"
+       line 441, "pan.___", state 589, "(1)"
+       line 414, "pan.___", state 624, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 642, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 656, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 432, "pan.___", state 688, "(1)"
+       line 437, "pan.___", state 705, "(1)"
+       line 441, "pan.___", state 718, "(1)"
+       line 414, "pan.___", state 755, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 773, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 787, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 432, "pan.___", state 819, "(1)"
+       line 437, "pan.___", state 836, "(1)"
+       line 441, "pan.___", state 849, "(1)"
+       line 271, "pan.___", state 904, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 913, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 928, "(1)"
+       line 283, "pan.___", state 935, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 951, "(1)"
+       line 252, "pan.___", state 959, "(1)"
+       line 256, "pan.___", state 971, "(1)"
+       line 260, "pan.___", state 979, "(1)"
+       line 275, "pan.___", state 1004, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1017, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1026, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1042, "(1)"
+       line 252, "pan.___", state 1050, "(1)"
+       line 256, "pan.___", state 1062, "(1)"
+       line 260, "pan.___", state 1070, "(1)"
+       line 275, "pan.___", state 1095, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1108, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1117, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1133, "(1)"
+       line 252, "pan.___", state 1141, "(1)"
+       line 256, "pan.___", state 1153, "(1)"
+       line 260, "pan.___", state 1161, "(1)"
+       line 275, "pan.___", state 1186, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1199, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1208, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1224, "(1)"
+       line 252, "pan.___", state 1232, "(1)"
+       line 256, "pan.___", state 1244, "(1)"
+       line 260, "pan.___", state 1252, "(1)"
+       line 1236, "pan.___", state 1267, "-end-"
+       (71 of 1267 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1299, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 5.57e+03 seconds
+pan: rate 1203.9777 states/second
+pan: avg transition delay 1.2537e-06 usec
+cp .input.spin urcu_free.spin.input
+cp .input.spin.trail urcu_free.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free.ltl b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free.ltl
new file mode 100644 (file)
index 0000000..6be1be9
--- /dev/null
@@ -0,0 +1 @@
+[] (!read_poison)
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free.spin.input b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free.spin.input
new file mode 100644 (file)
index 0000000..b59aa77
--- /dev/null
@@ -0,0 +1,1272 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_nested.define b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_nested.define
new file mode 100644 (file)
index 0000000..0fb59bd
--- /dev/null
@@ -0,0 +1 @@
+#define READER_NEST_LEVEL 2
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.define b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.define
new file mode 100644 (file)
index 0000000..d99d793
--- /dev/null
@@ -0,0 +1 @@
+#define NO_MB
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.log b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.log
new file mode 100644 (file)
index 0000000..5ccd769
--- /dev/null
@@ -0,0 +1,841 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_mb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    5985 States=    1e+06 Transitions= 3.71e+08 Memory=   550.432        t=    439 R=   2e+03
+Depth=    8926 States=    2e+06 Transitions= 9.51e+08 Memory=   634.416        t= 1.15e+03 R=   2e+03
+Depth=    8926 States=    3e+06 Transitions= 1.54e+09 Memory=   718.303        t= 1.89e+03 R=   2e+03
+pan: resizing hashtable to -w22..  done
+Depth=    8926 States=    4e+06 Transitions= 2.08e+09 Memory=   833.311        t= 2.56e+03 R=   2e+03
+Depth=    8926 States=    5e+06 Transitions= 2.46e+09 Memory=   917.295        t= 3.01e+03 R=   2e+03
+Depth=    8926 States=    6e+06 Transitions= 2.91e+09 Memory=  1001.279        t= 3.56e+03 R=   2e+03
+pan: claim violated! (at depth 1267)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 8926, errors: 1
+  6822337 states, stored
+3.2012922e+09 states, matched
+3.2081145e+09 transitions (= stored+matched)
+1.7190132e+10 atomic steps
+hash conflicts: 2.341747e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+  754.729      equivalent memory usage for states (stored*(State-vector + overhead))
+  580.944      actual memory usage for states (compression: 76.97%)
+               state-vector as stored = 61 byte + 28 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+ 1070.322      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 21, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 53, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 67, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 86, "(1)"
+       line 438, "pan.___", state 116, "(1)"
+       line 442, "pan.___", state 129, "(1)"
+       line 603, "pan.___", state 150, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 157, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 189, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 203, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 222, "(1)"
+       line 438, "pan.___", state 252, "(1)"
+       line 442, "pan.___", state 265, "(1)"
+       line 411, "pan.___", state 286, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 318, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 332, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 351, "(1)"
+       line 438, "pan.___", state 381, "(1)"
+       line 442, "pan.___", state 394, "(1)"
+       line 411, "pan.___", state 417, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 419, "(1)"
+       line 411, "pan.___", state 420, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 420, "else"
+       line 411, "pan.___", state 423, "(1)"
+       line 415, "pan.___", state 431, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 433, "(1)"
+       line 415, "pan.___", state 434, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 434, "else"
+       line 415, "pan.___", state 437, "(1)"
+       line 415, "pan.___", state 438, "(1)"
+       line 415, "pan.___", state 438, "(1)"
+       line 413, "pan.___", state 443, "((i<1))"
+       line 413, "pan.___", state 443, "((i>=1))"
+       line 420, "pan.___", state 449, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 451, "(1)"
+       line 420, "pan.___", state 452, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 452, "else"
+       line 420, "pan.___", state 455, "(1)"
+       line 420, "pan.___", state 456, "(1)"
+       line 420, "pan.___", state 456, "(1)"
+       line 424, "pan.___", state 463, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 465, "(1)"
+       line 424, "pan.___", state 466, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 466, "else"
+       line 424, "pan.___", state 469, "(1)"
+       line 424, "pan.___", state 470, "(1)"
+       line 424, "pan.___", state 470, "(1)"
+       line 422, "pan.___", state 475, "((i<2))"
+       line 422, "pan.___", state 475, "((i>=2))"
+       line 429, "pan.___", state 482, "(1)"
+       line 429, "pan.___", state 483, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 483, "else"
+       line 429, "pan.___", state 486, "(1)"
+       line 429, "pan.___", state 487, "(1)"
+       line 429, "pan.___", state 487, "(1)"
+       line 433, "pan.___", state 495, "(1)"
+       line 433, "pan.___", state 496, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 496, "else"
+       line 433, "pan.___", state 499, "(1)"
+       line 433, "pan.___", state 500, "(1)"
+       line 433, "pan.___", state 500, "(1)"
+       line 431, "pan.___", state 505, "((i<1))"
+       line 431, "pan.___", state 505, "((i>=1))"
+       line 438, "pan.___", state 512, "(1)"
+       line 438, "pan.___", state 513, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 513, "else"
+       line 438, "pan.___", state 516, "(1)"
+       line 438, "pan.___", state 517, "(1)"
+       line 438, "pan.___", state 517, "(1)"
+       line 442, "pan.___", state 525, "(1)"
+       line 442, "pan.___", state 526, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 526, "else"
+       line 442, "pan.___", state 529, "(1)"
+       line 442, "pan.___", state 530, "(1)"
+       line 442, "pan.___", state 530, "(1)"
+       line 440, "pan.___", state 535, "((i<2))"
+       line 440, "pan.___", state 535, "((i>=2))"
+       line 450, "pan.___", state 539, "(1)"
+       line 450, "pan.___", state 539, "(1)"
+       line 603, "pan.___", state 542, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 603, "pan.___", state 543, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 603, "pan.___", state 544, "(1)"
+       line 272, "pan.___", state 548, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 550, "(1)"
+       line 276, "pan.___", state 557, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 559, "(1)"
+       line 276, "pan.___", state 560, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 560, "else"
+       line 274, "pan.___", state 565, "((i<1))"
+       line 274, "pan.___", state 565, "((i>=1))"
+       line 280, "pan.___", state 570, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 572, "(1)"
+       line 280, "pan.___", state 573, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 573, "else"
+       line 284, "pan.___", state 579, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 581, "(1)"
+       line 284, "pan.___", state 582, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 582, "else"
+       line 282, "pan.___", state 587, "((i<2))"
+       line 282, "pan.___", state 587, "((i>=2))"
+       line 249, "pan.___", state 595, "(1)"
+       line 253, "pan.___", state 603, "(1)"
+       line 253, "pan.___", state 604, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 604, "else"
+       line 251, "pan.___", state 609, "((i<1))"
+       line 251, "pan.___", state 609, "((i>=1))"
+       line 257, "pan.___", state 615, "(1)"
+       line 257, "pan.___", state 616, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 616, "else"
+       line 261, "pan.___", state 623, "(1)"
+       line 261, "pan.___", state 624, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 624, "else"
+       line 266, "pan.___", state 633, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 633, "else"
+       line 299, "pan.___", state 635, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 635, "else"
+       line 411, "pan.___", state 641, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 673, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 687, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 706, "(1)"
+       line 438, "pan.___", state 736, "(1)"
+       line 442, "pan.___", state 749, "(1)"
+       line 411, "pan.___", state 777, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 809, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 823, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 842, "(1)"
+       line 438, "pan.___", state 872, "(1)"
+       line 442, "pan.___", state 885, "(1)"
+       line 411, "pan.___", state 906, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 908, "(1)"
+       line 411, "pan.___", state 909, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 909, "else"
+       line 411, "pan.___", state 912, "(1)"
+       line 415, "pan.___", state 920, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 922, "(1)"
+       line 415, "pan.___", state 923, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 923, "else"
+       line 415, "pan.___", state 926, "(1)"
+       line 415, "pan.___", state 927, "(1)"
+       line 415, "pan.___", state 927, "(1)"
+       line 413, "pan.___", state 932, "((i<1))"
+       line 413, "pan.___", state 932, "((i>=1))"
+       line 420, "pan.___", state 938, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 940, "(1)"
+       line 420, "pan.___", state 941, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 941, "else"
+       line 420, "pan.___", state 944, "(1)"
+       line 420, "pan.___", state 945, "(1)"
+       line 420, "pan.___", state 945, "(1)"
+       line 424, "pan.___", state 952, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 954, "(1)"
+       line 424, "pan.___", state 955, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 955, "else"
+       line 424, "pan.___", state 958, "(1)"
+       line 424, "pan.___", state 959, "(1)"
+       line 424, "pan.___", state 959, "(1)"
+       line 422, "pan.___", state 964, "((i<2))"
+       line 422, "pan.___", state 964, "((i>=2))"
+       line 429, "pan.___", state 971, "(1)"
+       line 429, "pan.___", state 972, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 972, "else"
+       line 429, "pan.___", state 975, "(1)"
+       line 429, "pan.___", state 976, "(1)"
+       line 429, "pan.___", state 976, "(1)"
+       line 433, "pan.___", state 984, "(1)"
+       line 433, "pan.___", state 985, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 985, "else"
+       line 433, "pan.___", state 988, "(1)"
+       line 433, "pan.___", state 989, "(1)"
+       line 433, "pan.___", state 989, "(1)"
+       line 431, "pan.___", state 994, "((i<1))"
+       line 431, "pan.___", state 994, "((i>=1))"
+       line 438, "pan.___", state 1001, "(1)"
+       line 438, "pan.___", state 1002, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 1002, "else"
+       line 438, "pan.___", state 1005, "(1)"
+       line 438, "pan.___", state 1006, "(1)"
+       line 438, "pan.___", state 1006, "(1)"
+       line 442, "pan.___", state 1014, "(1)"
+       line 442, "pan.___", state 1015, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 1015, "else"
+       line 442, "pan.___", state 1018, "(1)"
+       line 442, "pan.___", state 1019, "(1)"
+       line 442, "pan.___", state 1019, "(1)"
+       line 440, "pan.___", state 1024, "((i<2))"
+       line 440, "pan.___", state 1024, "((i>=2))"
+       line 450, "pan.___", state 1028, "(1)"
+       line 450, "pan.___", state 1028, "(1)"
+       line 611, "pan.___", state 1032, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 1037, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1069, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1083, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1102, "(1)"
+       line 438, "pan.___", state 1132, "(1)"
+       line 442, "pan.___", state 1145, "(1)"
+       line 411, "pan.___", state 1169, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1201, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1215, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1234, "(1)"
+       line 438, "pan.___", state 1264, "(1)"
+       line 442, "pan.___", state 1277, "(1)"
+       line 411, "pan.___", state 1302, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1334, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1348, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1367, "(1)"
+       line 438, "pan.___", state 1397, "(1)"
+       line 442, "pan.___", state 1410, "(1)"
+       line 411, "pan.___", state 1431, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1463, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1477, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1496, "(1)"
+       line 438, "pan.___", state 1526, "(1)"
+       line 442, "pan.___", state 1539, "(1)"
+       line 272, "pan.___", state 1562, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 1564, "(1)"
+       line 276, "pan.___", state 1571, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1573, "(1)"
+       line 276, "pan.___", state 1574, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 1574, "else"
+       line 274, "pan.___", state 1579, "((i<1))"
+       line 274, "pan.___", state 1579, "((i>=1))"
+       line 280, "pan.___", state 1584, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1586, "(1)"
+       line 280, "pan.___", state 1587, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1587, "else"
+       line 284, "pan.___", state 1593, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1595, "(1)"
+       line 284, "pan.___", state 1596, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1596, "else"
+       line 282, "pan.___", state 1601, "((i<2))"
+       line 282, "pan.___", state 1601, "((i>=2))"
+       line 249, "pan.___", state 1609, "(1)"
+       line 253, "pan.___", state 1617, "(1)"
+       line 253, "pan.___", state 1618, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1618, "else"
+       line 251, "pan.___", state 1623, "((i<1))"
+       line 251, "pan.___", state 1623, "((i>=1))"
+       line 257, "pan.___", state 1629, "(1)"
+       line 257, "pan.___", state 1630, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1630, "else"
+       line 261, "pan.___", state 1637, "(1)"
+       line 261, "pan.___", state 1638, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1638, "else"
+       line 266, "pan.___", state 1647, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1647, "else"
+       line 299, "pan.___", state 1649, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 1649, "else"
+       line 411, "pan.___", state 1655, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1687, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1701, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1720, "(1)"
+       line 438, "pan.___", state 1750, "(1)"
+       line 442, "pan.___", state 1763, "(1)"
+       line 411, "pan.___", state 1784, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1816, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1830, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1849, "(1)"
+       line 438, "pan.___", state 1879, "(1)"
+       line 442, "pan.___", state 1892, "(1)"
+       line 411, "pan.___", state 1916, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1948, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1962, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1981, "(1)"
+       line 438, "pan.___", state 2011, "(1)"
+       line 442, "pan.___", state 2024, "(1)"
+       line 650, "pan.___", state 2045, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 2052, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2084, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2098, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2117, "(1)"
+       line 438, "pan.___", state 2147, "(1)"
+       line 442, "pan.___", state 2160, "(1)"
+       line 411, "pan.___", state 2181, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2213, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2227, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2246, "(1)"
+       line 438, "pan.___", state 2276, "(1)"
+       line 442, "pan.___", state 2289, "(1)"
+       line 411, "pan.___", state 2312, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2314, "(1)"
+       line 411, "pan.___", state 2315, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2315, "else"
+       line 411, "pan.___", state 2318, "(1)"
+       line 415, "pan.___", state 2326, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2328, "(1)"
+       line 415, "pan.___", state 2329, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2329, "else"
+       line 415, "pan.___", state 2332, "(1)"
+       line 415, "pan.___", state 2333, "(1)"
+       line 415, "pan.___", state 2333, "(1)"
+       line 413, "pan.___", state 2338, "((i<1))"
+       line 413, "pan.___", state 2338, "((i>=1))"
+       line 420, "pan.___", state 2344, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2346, "(1)"
+       line 420, "pan.___", state 2347, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2347, "else"
+       line 420, "pan.___", state 2350, "(1)"
+       line 420, "pan.___", state 2351, "(1)"
+       line 420, "pan.___", state 2351, "(1)"
+       line 424, "pan.___", state 2358, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2360, "(1)"
+       line 424, "pan.___", state 2361, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2361, "else"
+       line 424, "pan.___", state 2364, "(1)"
+       line 424, "pan.___", state 2365, "(1)"
+       line 424, "pan.___", state 2365, "(1)"
+       line 422, "pan.___", state 2370, "((i<2))"
+       line 422, "pan.___", state 2370, "((i>=2))"
+       line 429, "pan.___", state 2377, "(1)"
+       line 429, "pan.___", state 2378, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 2378, "else"
+       line 429, "pan.___", state 2381, "(1)"
+       line 429, "pan.___", state 2382, "(1)"
+       line 429, "pan.___", state 2382, "(1)"
+       line 433, "pan.___", state 2390, "(1)"
+       line 433, "pan.___", state 2391, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 2391, "else"
+       line 433, "pan.___", state 2394, "(1)"
+       line 433, "pan.___", state 2395, "(1)"
+       line 433, "pan.___", state 2395, "(1)"
+       line 431, "pan.___", state 2400, "((i<1))"
+       line 431, "pan.___", state 2400, "((i>=1))"
+       line 438, "pan.___", state 2407, "(1)"
+       line 438, "pan.___", state 2408, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 2408, "else"
+       line 438, "pan.___", state 2411, "(1)"
+       line 438, "pan.___", state 2412, "(1)"
+       line 438, "pan.___", state 2412, "(1)"
+       line 442, "pan.___", state 2420, "(1)"
+       line 442, "pan.___", state 2421, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 2421, "else"
+       line 442, "pan.___", state 2424, "(1)"
+       line 442, "pan.___", state 2425, "(1)"
+       line 442, "pan.___", state 2425, "(1)"
+       line 440, "pan.___", state 2430, "((i<2))"
+       line 440, "pan.___", state 2430, "((i>=2))"
+       line 450, "pan.___", state 2434, "(1)"
+       line 450, "pan.___", state 2434, "(1)"
+       line 650, "pan.___", state 2437, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 650, "pan.___", state 2438, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 650, "pan.___", state 2439, "(1)"
+       line 272, "pan.___", state 2443, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 2445, "(1)"
+       line 276, "pan.___", state 2452, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2454, "(1)"
+       line 276, "pan.___", state 2455, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 2455, "else"
+       line 274, "pan.___", state 2460, "((i<1))"
+       line 274, "pan.___", state 2460, "((i>=1))"
+       line 280, "pan.___", state 2465, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2467, "(1)"
+       line 280, "pan.___", state 2468, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 2468, "else"
+       line 284, "pan.___", state 2474, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2476, "(1)"
+       line 284, "pan.___", state 2477, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 2477, "else"
+       line 282, "pan.___", state 2482, "((i<2))"
+       line 282, "pan.___", state 2482, "((i>=2))"
+       line 249, "pan.___", state 2490, "(1)"
+       line 253, "pan.___", state 2498, "(1)"
+       line 253, "pan.___", state 2499, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2499, "else"
+       line 251, "pan.___", state 2504, "((i<1))"
+       line 251, "pan.___", state 2504, "((i>=1))"
+       line 257, "pan.___", state 2510, "(1)"
+       line 257, "pan.___", state 2511, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2511, "else"
+       line 261, "pan.___", state 2518, "(1)"
+       line 261, "pan.___", state 2519, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2519, "else"
+       line 266, "pan.___", state 2528, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2528, "else"
+       line 299, "pan.___", state 2530, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 2530, "else"
+       line 411, "pan.___", state 2536, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2568, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2582, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2601, "(1)"
+       line 438, "pan.___", state 2631, "(1)"
+       line 442, "pan.___", state 2644, "(1)"
+       line 272, "pan.___", state 2668, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 2670, "(1)"
+       line 276, "pan.___", state 2677, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2679, "(1)"
+       line 276, "pan.___", state 2680, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 2680, "else"
+       line 274, "pan.___", state 2685, "((i<1))"
+       line 274, "pan.___", state 2685, "((i>=1))"
+       line 280, "pan.___", state 2690, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2692, "(1)"
+       line 280, "pan.___", state 2693, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 2693, "else"
+       line 284, "pan.___", state 2699, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2701, "(1)"
+       line 284, "pan.___", state 2702, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 2702, "else"
+       line 282, "pan.___", state 2707, "((i<2))"
+       line 282, "pan.___", state 2707, "((i>=2))"
+       line 249, "pan.___", state 2715, "(1)"
+       line 253, "pan.___", state 2723, "(1)"
+       line 253, "pan.___", state 2724, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2724, "else"
+       line 251, "pan.___", state 2729, "((i<1))"
+       line 251, "pan.___", state 2729, "((i>=1))"
+       line 257, "pan.___", state 2735, "(1)"
+       line 257, "pan.___", state 2736, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2736, "else"
+       line 261, "pan.___", state 2743, "(1)"
+       line 261, "pan.___", state 2744, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2744, "else"
+       line 266, "pan.___", state 2753, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2753, "else"
+       line 299, "pan.___", state 2755, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 2755, "else"
+       line 411, "pan.___", state 2761, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2793, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2807, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2826, "(1)"
+       line 438, "pan.___", state 2856, "(1)"
+       line 442, "pan.___", state 2869, "(1)"
+       line 411, "pan.___", state 2890, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2922, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2936, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2955, "(1)"
+       line 438, "pan.___", state 2985, "(1)"
+       line 442, "pan.___", state 2998, "(1)"
+       line 249, "pan.___", state 3031, "(1)"
+       line 257, "pan.___", state 3051, "(1)"
+       line 261, "pan.___", state 3059, "(1)"
+       line 249, "pan.___", state 3074, "(1)"
+       line 257, "pan.___", state 3094, "(1)"
+       line 261, "pan.___", state 3102, "(1)"
+       line 898, "pan.___", state 3119, "-end-"
+       (330 of 3119 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 20, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 26, "(1)"
+       line 415, "pan.___", state 34, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 40, "(1)"
+       line 415, "pan.___", state 41, "(1)"
+       line 415, "pan.___", state 41, "(1)"
+       line 413, "pan.___", state 46, "((i<1))"
+       line 413, "pan.___", state 46, "((i>=1))"
+       line 420, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 58, "(1)"
+       line 420, "pan.___", state 59, "(1)"
+       line 420, "pan.___", state 59, "(1)"
+       line 424, "pan.___", state 72, "(1)"
+       line 424, "pan.___", state 73, "(1)"
+       line 424, "pan.___", state 73, "(1)"
+       line 422, "pan.___", state 78, "((i<2))"
+       line 422, "pan.___", state 78, "((i>=2))"
+       line 429, "pan.___", state 85, "(1)"
+       line 429, "pan.___", state 86, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 86, "else"
+       line 429, "pan.___", state 89, "(1)"
+       line 429, "pan.___", state 90, "(1)"
+       line 429, "pan.___", state 90, "(1)"
+       line 433, "pan.___", state 98, "(1)"
+       line 433, "pan.___", state 99, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 99, "else"
+       line 433, "pan.___", state 102, "(1)"
+       line 433, "pan.___", state 103, "(1)"
+       line 433, "pan.___", state 103, "(1)"
+       line 431, "pan.___", state 108, "((i<1))"
+       line 431, "pan.___", state 108, "((i>=1))"
+       line 438, "pan.___", state 115, "(1)"
+       line 438, "pan.___", state 116, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 116, "else"
+       line 438, "pan.___", state 119, "(1)"
+       line 438, "pan.___", state 120, "(1)"
+       line 438, "pan.___", state 120, "(1)"
+       line 442, "pan.___", state 128, "(1)"
+       line 442, "pan.___", state 129, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 129, "else"
+       line 442, "pan.___", state 132, "(1)"
+       line 442, "pan.___", state 133, "(1)"
+       line 442, "pan.___", state 133, "(1)"
+       line 440, "pan.___", state 138, "((i<2))"
+       line 440, "pan.___", state 138, "((i>=2))"
+       line 450, "pan.___", state 142, "(1)"
+       line 450, "pan.___", state 142, "(1)"
+       line 272, "pan.___", state 151, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 160, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 168, "((i<1))"
+       line 274, "pan.___", state 168, "((i>=1))"
+       line 280, "pan.___", state 173, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 1021, "pan.___", state 201, "old_data = cached_rcu_ptr.val[_pid]"
+       line 1032, "pan.___", state 205, "_proc_urcu_writer = (_proc_urcu_writer|(1<<4))"
+       line 411, "pan.___", state 213, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 219, "(1)"
+       line 415, "pan.___", state 227, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 233, "(1)"
+       line 415, "pan.___", state 234, "(1)"
+       line 415, "pan.___", state 234, "(1)"
+       line 413, "pan.___", state 239, "((i<1))"
+       line 413, "pan.___", state 239, "((i>=1))"
+       line 420, "pan.___", state 247, "(1)"
+       line 420, "pan.___", state 248, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 248, "else"
+       line 420, "pan.___", state 251, "(1)"
+       line 420, "pan.___", state 252, "(1)"
+       line 420, "pan.___", state 252, "(1)"
+       line 424, "pan.___", state 259, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 265, "(1)"
+       line 424, "pan.___", state 266, "(1)"
+       line 424, "pan.___", state 266, "(1)"
+       line 422, "pan.___", state 271, "((i<2))"
+       line 422, "pan.___", state 271, "((i>=2))"
+       line 429, "pan.___", state 278, "(1)"
+       line 429, "pan.___", state 279, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 279, "else"
+       line 429, "pan.___", state 282, "(1)"
+       line 429, "pan.___", state 283, "(1)"
+       line 429, "pan.___", state 283, "(1)"
+       line 433, "pan.___", state 291, "(1)"
+       line 433, "pan.___", state 292, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 292, "else"
+       line 433, "pan.___", state 295, "(1)"
+       line 433, "pan.___", state 296, "(1)"
+       line 433, "pan.___", state 296, "(1)"
+       line 431, "pan.___", state 301, "((i<1))"
+       line 431, "pan.___", state 301, "((i>=1))"
+       line 438, "pan.___", state 308, "(1)"
+       line 438, "pan.___", state 309, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 309, "else"
+       line 438, "pan.___", state 312, "(1)"
+       line 438, "pan.___", state 313, "(1)"
+       line 438, "pan.___", state 313, "(1)"
+       line 442, "pan.___", state 321, "(1)"
+       line 442, "pan.___", state 322, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 322, "else"
+       line 442, "pan.___", state 325, "(1)"
+       line 442, "pan.___", state 326, "(1)"
+       line 442, "pan.___", state 326, "(1)"
+       line 440, "pan.___", state 331, "((i<2))"
+       line 440, "pan.___", state 331, "((i>=2))"
+       line 450, "pan.___", state 335, "(1)"
+       line 450, "pan.___", state 335, "(1)"
+       line 411, "pan.___", state 346, "(1)"
+       line 411, "pan.___", state 347, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 347, "else"
+       line 411, "pan.___", state 350, "(1)"
+       line 415, "pan.___", state 358, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 364, "(1)"
+       line 415, "pan.___", state 365, "(1)"
+       line 415, "pan.___", state 365, "(1)"
+       line 413, "pan.___", state 370, "((i<1))"
+       line 413, "pan.___", state 370, "((i>=1))"
+       line 420, "pan.___", state 376, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 382, "(1)"
+       line 420, "pan.___", state 383, "(1)"
+       line 420, "pan.___", state 383, "(1)"
+       line 424, "pan.___", state 390, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 396, "(1)"
+       line 424, "pan.___", state 397, "(1)"
+       line 424, "pan.___", state 397, "(1)"
+       line 422, "pan.___", state 402, "((i<2))"
+       line 422, "pan.___", state 402, "((i>=2))"
+       line 429, "pan.___", state 409, "(1)"
+       line 429, "pan.___", state 410, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 410, "else"
+       line 429, "pan.___", state 413, "(1)"
+       line 429, "pan.___", state 414, "(1)"
+       line 429, "pan.___", state 414, "(1)"
+       line 433, "pan.___", state 422, "(1)"
+       line 433, "pan.___", state 423, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 423, "else"
+       line 433, "pan.___", state 426, "(1)"
+       line 433, "pan.___", state 427, "(1)"
+       line 433, "pan.___", state 427, "(1)"
+       line 431, "pan.___", state 432, "((i<1))"
+       line 431, "pan.___", state 432, "((i>=1))"
+       line 438, "pan.___", state 439, "(1)"
+       line 438, "pan.___", state 440, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 440, "else"
+       line 438, "pan.___", state 443, "(1)"
+       line 438, "pan.___", state 444, "(1)"
+       line 438, "pan.___", state 444, "(1)"
+       line 442, "pan.___", state 452, "(1)"
+       line 442, "pan.___", state 453, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 453, "else"
+       line 442, "pan.___", state 456, "(1)"
+       line 442, "pan.___", state 457, "(1)"
+       line 442, "pan.___", state 457, "(1)"
+       line 440, "pan.___", state 462, "((i<2))"
+       line 440, "pan.___", state 462, "((i>=2))"
+       line 450, "pan.___", state 466, "(1)"
+       line 450, "pan.___", state 466, "(1)"
+       line 1086, "pan.___", state 477, "_proc_urcu_writer = (_proc_urcu_writer&~((1<<9)))"
+       line 1091, "pan.___", state 478, "_proc_urcu_writer = (_proc_urcu_writer&~(((1<<8)|(1<<7))))"
+       line 411, "pan.___", state 483, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 489, "(1)"
+       line 415, "pan.___", state 497, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 503, "(1)"
+       line 415, "pan.___", state 504, "(1)"
+       line 415, "pan.___", state 504, "(1)"
+       line 413, "pan.___", state 509, "((i<1))"
+       line 413, "pan.___", state 509, "((i>=1))"
+       line 420, "pan.___", state 515, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 521, "(1)"
+       line 420, "pan.___", state 522, "(1)"
+       line 420, "pan.___", state 522, "(1)"
+       line 424, "pan.___", state 529, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 535, "(1)"
+       line 424, "pan.___", state 536, "(1)"
+       line 424, "pan.___", state 536, "(1)"
+       line 422, "pan.___", state 541, "((i<2))"
+       line 422, "pan.___", state 541, "((i>=2))"
+       line 429, "pan.___", state 548, "(1)"
+       line 429, "pan.___", state 549, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 549, "else"
+       line 429, "pan.___", state 552, "(1)"
+       line 429, "pan.___", state 553, "(1)"
+       line 429, "pan.___", state 553, "(1)"
+       line 433, "pan.___", state 561, "(1)"
+       line 433, "pan.___", state 562, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 562, "else"
+       line 433, "pan.___", state 565, "(1)"
+       line 433, "pan.___", state 566, "(1)"
+       line 433, "pan.___", state 566, "(1)"
+       line 431, "pan.___", state 571, "((i<1))"
+       line 431, "pan.___", state 571, "((i>=1))"
+       line 438, "pan.___", state 578, "(1)"
+       line 438, "pan.___", state 579, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 579, "else"
+       line 438, "pan.___", state 582, "(1)"
+       line 438, "pan.___", state 583, "(1)"
+       line 438, "pan.___", state 583, "(1)"
+       line 442, "pan.___", state 591, "(1)"
+       line 442, "pan.___", state 592, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 592, "else"
+       line 442, "pan.___", state 595, "(1)"
+       line 442, "pan.___", state 596, "(1)"
+       line 442, "pan.___", state 596, "(1)"
+       line 450, "pan.___", state 605, "(1)"
+       line 450, "pan.___", state 605, "(1)"
+       line 411, "pan.___", state 612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 626, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 677, "(1)"
+       line 433, "pan.___", state 690, "(1)"
+       line 438, "pan.___", state 707, "(1)"
+       line 442, "pan.___", state 720, "(1)"
+       line 415, "pan.___", state 757, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 775, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 789, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 821, "(1)"
+       line 438, "pan.___", state 838, "(1)"
+       line 442, "pan.___", state 851, "(1)"
+       line 1168, "pan.___", state 878, "_proc_urcu_writer = (_proc_urcu_writer|(1<<13))"
+       line 272, "pan.___", state 906, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 908, "(1)"
+       line 276, "pan.___", state 915, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 917, "(1)"
+       line 276, "pan.___", state 918, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 918, "else"
+       line 274, "pan.___", state 923, "((i<1))"
+       line 274, "pan.___", state 923, "((i>=1))"
+       line 280, "pan.___", state 928, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 930, "(1)"
+       line 280, "pan.___", state 931, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 931, "else"
+       line 284, "pan.___", state 937, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 939, "(1)"
+       line 284, "pan.___", state 940, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 940, "else"
+       line 282, "pan.___", state 945, "((i<2))"
+       line 282, "pan.___", state 945, "((i>=2))"
+       line 249, "pan.___", state 953, "(1)"
+       line 253, "pan.___", state 961, "(1)"
+       line 253, "pan.___", state 962, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 962, "else"
+       line 251, "pan.___", state 967, "((i<1))"
+       line 251, "pan.___", state 967, "((i>=1))"
+       line 257, "pan.___", state 973, "(1)"
+       line 257, "pan.___", state 974, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 974, "else"
+       line 261, "pan.___", state 981, "(1)"
+       line 261, "pan.___", state 982, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 982, "else"
+       line 266, "pan.___", state 991, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 991, "else"
+       line 299, "pan.___", state 993, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 993, "else"
+       line 272, "pan.___", state 997, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 999, "(1)"
+       line 276, "pan.___", state 1006, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1008, "(1)"
+       line 276, "pan.___", state 1009, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 1009, "else"
+       line 274, "pan.___", state 1014, "((i<1))"
+       line 274, "pan.___", state 1014, "((i>=1))"
+       line 280, "pan.___", state 1019, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1021, "(1)"
+       line 280, "pan.___", state 1022, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1022, "else"
+       line 284, "pan.___", state 1028, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1030, "(1)"
+       line 284, "pan.___", state 1031, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1031, "else"
+       line 282, "pan.___", state 1036, "((i<2))"
+       line 282, "pan.___", state 1036, "((i>=2))"
+       line 249, "pan.___", state 1044, "(1)"
+       line 253, "pan.___", state 1052, "(1)"
+       line 253, "pan.___", state 1053, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1053, "else"
+       line 251, "pan.___", state 1058, "((i<1))"
+       line 251, "pan.___", state 1058, "((i>=1))"
+       line 257, "pan.___", state 1064, "(1)"
+       line 257, "pan.___", state 1065, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1065, "else"
+       line 261, "pan.___", state 1072, "(1)"
+       line 261, "pan.___", state 1073, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1073, "else"
+       line 266, "pan.___", state 1082, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1082, "else"
+       line 299, "pan.___", state 1084, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 1084, "else"
+       line 276, "pan.___", state 1097, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1110, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1119, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1135, "(1)"
+       line 253, "pan.___", state 1143, "(1)"
+       line 257, "pan.___", state 1155, "(1)"
+       line 261, "pan.___", state 1163, "(1)"
+       line 272, "pan.___", state 1179, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 1181, "(1)"
+       line 276, "pan.___", state 1188, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1190, "(1)"
+       line 276, "pan.___", state 1191, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 1191, "else"
+       line 274, "pan.___", state 1196, "((i<1))"
+       line 274, "pan.___", state 1196, "((i>=1))"
+       line 280, "pan.___", state 1201, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1203, "(1)"
+       line 280, "pan.___", state 1204, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1204, "else"
+       line 284, "pan.___", state 1210, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1212, "(1)"
+       line 284, "pan.___", state 1213, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1213, "else"
+       line 282, "pan.___", state 1218, "((i<2))"
+       line 282, "pan.___", state 1218, "((i>=2))"
+       line 249, "pan.___", state 1226, "(1)"
+       line 253, "pan.___", state 1234, "(1)"
+       line 253, "pan.___", state 1235, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1235, "else"
+       line 251, "pan.___", state 1240, "((i<1))"
+       line 251, "pan.___", state 1240, "((i>=1))"
+       line 257, "pan.___", state 1246, "(1)"
+       line 257, "pan.___", state 1247, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1247, "else"
+       line 261, "pan.___", state 1254, "(1)"
+       line 261, "pan.___", state 1255, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1255, "else"
+       line 266, "pan.___", state 1264, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1264, "else"
+       line 299, "pan.___", state 1266, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 1266, "else"
+       line 1237, "pan.___", state 1269, "-end-"
+       (227 of 1269 states)
+unreached in proctype :init:
+       line 1248, "pan.___", state 9, "((j<2))"
+       line 1248, "pan.___", state 9, "((j>=2))"
+       line 1249, "pan.___", state 20, "((j<2))"
+       line 1249, "pan.___", state 20, "((j>=2))"
+       line 1254, "pan.___", state 33, "((j<2))"
+       line 1254, "pan.___", state 33, "((j>=2))"
+       line 1252, "pan.___", state 43, "((i<1))"
+       line 1252, "pan.___", state 43, "((i>=1))"
+       line 1262, "pan.___", state 54, "((j<2))"
+       line 1262, "pan.___", state 54, "((j>=2))"
+       line 1266, "pan.___", state 67, "((j<2))"
+       line 1266, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 3.93e+03 seconds
+pan: rate 1735.2351 states/second
+pan: avg transition delay 1.2255e-06 usec
+cp .input.spin urcu_free_no_mb.spin.input
+cp .input.spin.trail urcu_free_no_mb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.spin.input b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.spin.input
new file mode 100644 (file)
index 0000000..ba5712c
--- /dev/null
@@ -0,0 +1,1273 @@
+#define NO_MB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.spin.input.trail b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_mb.spin.input.trail
new file mode 100644 (file)
index 0000000..fbd72f1
--- /dev/null
@@ -0,0 +1,1270 @@
+-2:3:-2
+-4:-4:-4
+1:0:4468
+2:3:4388
+3:3:4391
+4:3:4391
+5:3:4394
+6:3:4402
+7:3:4402
+8:3:4405
+9:3:4411
+10:3:4415
+11:3:4415
+12:3:4418
+13:3:4428
+14:3:4436
+15:3:4436
+16:3:4439
+17:3:4445
+18:3:4449
+19:3:4449
+20:3:4452
+21:3:4458
+22:3:4462
+23:3:4463
+24:0:4468
+25:3:4465
+26:0:4468
+27:2:3121
+28:0:4468
+29:2:3127
+30:0:4468
+31:2:3128
+32:0:4468
+33:2:3130
+34:0:4468
+35:2:3131
+36:0:4468
+37:2:3132
+38:0:4468
+39:2:3133
+40:0:4468
+41:2:3134
+42:2:3135
+43:2:3139
+44:2:3140
+45:2:3148
+46:2:3149
+47:2:3153
+48:2:3154
+49:2:3162
+50:2:3167
+51:2:3171
+52:2:3172
+53:2:3180
+54:2:3181
+55:2:3185
+56:2:3186
+57:2:3180
+58:2:3181
+59:2:3185
+60:2:3186
+61:2:3194
+62:2:3199
+63:2:3200
+64:2:3211
+65:2:3212
+66:2:3213
+67:2:3224
+68:2:3229
+69:2:3230
+70:2:3241
+71:2:3242
+72:2:3243
+73:2:3241
+74:2:3242
+75:2:3243
+76:2:3254
+77:2:3262
+78:0:4468
+79:2:3133
+80:0:4468
+81:2:3266
+82:2:3270
+83:2:3271
+84:2:3275
+85:2:3279
+86:2:3280
+87:2:3284
+88:2:3292
+89:2:3293
+90:2:3297
+91:2:3301
+92:2:3302
+93:2:3297
+94:2:3298
+95:2:3306
+96:0:4468
+97:2:3133
+98:0:4468
+99:2:3314
+100:2:3315
+101:2:3316
+102:0:4468
+103:2:3133
+104:0:4468
+105:2:3324
+106:0:4468
+107:2:3133
+108:0:4468
+109:2:3327
+110:2:3328
+111:2:3332
+112:2:3333
+113:2:3341
+114:2:3342
+115:2:3346
+116:2:3347
+117:2:3355
+118:2:3360
+119:2:3361
+120:2:3373
+121:2:3374
+122:2:3378
+123:2:3379
+124:2:3373
+125:2:3374
+126:2:3378
+127:2:3379
+128:2:3387
+129:2:3392
+130:2:3393
+131:2:3404
+132:2:3405
+133:2:3406
+134:2:3417
+135:2:3422
+136:2:3423
+137:2:3434
+138:2:3435
+139:2:3436
+140:2:3434
+141:2:3435
+142:2:3436
+143:2:3447
+144:2:3454
+145:0:4468
+146:2:3133
+147:0:4468
+148:2:3458
+149:2:3459
+150:2:3460
+151:2:3472
+152:2:3473
+153:2:3477
+154:2:3478
+155:2:3486
+156:2:3491
+157:2:3495
+158:2:3496
+159:2:3504
+160:2:3505
+161:2:3509
+162:2:3510
+163:2:3504
+164:2:3505
+165:2:3509
+166:2:3510
+167:2:3518
+168:2:3523
+169:2:3524
+170:2:3535
+171:2:3536
+172:2:3537
+173:2:3548
+174:2:3553
+175:2:3554
+176:2:3565
+177:2:3566
+178:2:3567
+179:2:3565
+180:2:3566
+181:2:3567
+182:2:3578
+183:2:3589
+184:2:3590
+185:0:4468
+186:2:3133
+187:0:4468
+188:2:3597
+189:2:3598
+190:2:3602
+191:2:3603
+192:2:3611
+193:2:3612
+194:2:3616
+195:2:3617
+196:2:3625
+197:2:3630
+198:2:3634
+199:2:3635
+200:2:3643
+201:2:3644
+202:2:3648
+203:2:3649
+204:2:3643
+205:2:3644
+206:2:3648
+207:2:3649
+208:2:3657
+209:2:3662
+210:2:3663
+211:2:3674
+212:2:3675
+213:2:3676
+214:2:3687
+215:2:3692
+216:2:3693
+217:2:3704
+218:2:3705
+219:2:3706
+220:2:3704
+221:2:3705
+222:2:3706
+223:2:3717
+224:0:4468
+225:2:3133
+226:0:4468
+227:2:3726
+228:2:3727
+229:2:3731
+230:2:3732
+231:2:3740
+232:2:3741
+233:2:3745
+234:2:3746
+235:2:3754
+236:2:3759
+237:2:3763
+238:2:3764
+239:2:3772
+240:2:3773
+241:2:3777
+242:2:3778
+243:2:3772
+244:2:3773
+245:2:3777
+246:2:3778
+247:2:3786
+248:2:3791
+249:2:3792
+250:2:3803
+251:2:3804
+252:2:3805
+253:2:3816
+254:2:3821
+255:2:3822
+256:2:3833
+257:2:3834
+258:2:3835
+259:2:3833
+260:2:3834
+261:2:3835
+262:2:3846
+263:2:3853
+264:0:4468
+265:2:3133
+266:0:4468
+267:2:3857
+268:2:3858
+269:2:3859
+270:2:3871
+271:2:3872
+272:2:3876
+273:2:3877
+274:2:3885
+275:2:3890
+276:2:3894
+277:2:3895
+278:2:3903
+279:2:3904
+280:2:3908
+281:2:3909
+282:2:3903
+283:2:3904
+284:2:3908
+285:2:3909
+286:2:3917
+287:2:3922
+288:2:3923
+289:2:3934
+290:2:3935
+291:2:3936
+292:2:3947
+293:2:3952
+294:2:3953
+295:2:3964
+296:2:3965
+297:2:3966
+298:2:3964
+299:2:3965
+300:2:3966
+301:2:3977
+302:2:3987
+303:2:3988
+304:0:4468
+305:2:3133
+306:0:4468
+307:2:3997
+308:2:3998
+309:0:4468
+310:2:3133
+311:0:4468
+312:2:4002
+313:0:4468
+314:2:4010
+315:0:4468
+316:2:3128
+317:0:4468
+318:2:3130
+319:0:4468
+320:2:3131
+321:0:4468
+322:2:3132
+323:0:4468
+324:2:3133
+325:0:4468
+326:2:3134
+327:2:3135
+328:2:3139
+329:2:3140
+330:2:3148
+331:2:3149
+332:2:3153
+333:2:3154
+334:2:3162
+335:2:3167
+336:2:3171
+337:2:3172
+338:2:3180
+339:2:3181
+340:2:3182
+341:2:3180
+342:2:3181
+343:2:3185
+344:2:3186
+345:2:3194
+346:2:3199
+347:2:3200
+348:2:3211
+349:2:3212
+350:2:3213
+351:2:3224
+352:2:3229
+353:2:3230
+354:2:3241
+355:2:3242
+356:2:3243
+357:2:3241
+358:2:3242
+359:2:3243
+360:2:3254
+361:2:3262
+362:0:4468
+363:2:3133
+364:0:4468
+365:2:3266
+366:2:3270
+367:2:3271
+368:2:3275
+369:2:3279
+370:2:3280
+371:2:3284
+372:2:3292
+373:2:3293
+374:2:3297
+375:2:3298
+376:2:3297
+377:2:3301
+378:2:3302
+379:2:3306
+380:0:4468
+381:2:3133
+382:0:4468
+383:2:3314
+384:2:3315
+385:2:3316
+386:0:4468
+387:2:3133
+388:0:4468
+389:2:3324
+390:0:4468
+391:2:3133
+392:0:4468
+393:2:3327
+394:2:3328
+395:2:3332
+396:2:3333
+397:2:3341
+398:2:3342
+399:2:3346
+400:2:3347
+401:2:3355
+402:2:3360
+403:2:3361
+404:2:3373
+405:2:3374
+406:2:3378
+407:2:3379
+408:2:3373
+409:2:3374
+410:2:3378
+411:2:3379
+412:2:3387
+413:2:3392
+414:2:3393
+415:2:3404
+416:2:3405
+417:2:3406
+418:2:3417
+419:2:3422
+420:2:3423
+421:2:3434
+422:2:3435
+423:2:3436
+424:2:3434
+425:2:3435
+426:2:3436
+427:2:3447
+428:2:3454
+429:0:4468
+430:2:3133
+431:0:4468
+432:2:3458
+433:2:3459
+434:2:3460
+435:2:3472
+436:2:3473
+437:2:3477
+438:2:3478
+439:2:3486
+440:2:3491
+441:2:3495
+442:2:3496
+443:2:3504
+444:2:3505
+445:2:3509
+446:2:3510
+447:2:3504
+448:2:3505
+449:2:3509
+450:2:3510
+451:2:3518
+452:2:3523
+453:2:3524
+454:2:3535
+455:2:3536
+456:2:3537
+457:2:3548
+458:2:3553
+459:2:3554
+460:2:3565
+461:2:3566
+462:2:3567
+463:2:3565
+464:2:3566
+465:2:3567
+466:2:3578
+467:2:3589
+468:2:3590
+469:0:4468
+470:2:3133
+471:0:4468
+472:2:3597
+473:2:3598
+474:2:3602
+475:2:3603
+476:2:3611
+477:2:3612
+478:2:3616
+479:2:3617
+480:2:3625
+481:2:3630
+482:2:3634
+483:2:3635
+484:2:3643
+485:2:3644
+486:2:3648
+487:2:3649
+488:2:3643
+489:2:3644
+490:2:3648
+491:2:3649
+492:2:3657
+493:2:3662
+494:2:3663
+495:2:3674
+496:2:3675
+497:2:3676
+498:2:3687
+499:2:3692
+500:2:3693
+501:2:3704
+502:2:3705
+503:2:3706
+504:2:3704
+505:2:3705
+506:2:3706
+507:2:3717
+508:0:4468
+509:2:3133
+510:0:4468
+511:2:3726
+512:2:3727
+513:2:3731
+514:2:3732
+515:2:3740
+516:2:3741
+517:2:3745
+518:2:3746
+519:2:3754
+520:2:3759
+521:2:3763
+522:2:3764
+523:2:3772
+524:2:3773
+525:2:3777
+526:2:3778
+527:2:3772
+528:2:3773
+529:2:3777
+530:2:3778
+531:2:3786
+532:2:3791
+533:2:3792
+534:2:3803
+535:2:3804
+536:2:3805
+537:2:3816
+538:2:3821
+539:2:3822
+540:2:3833
+541:2:3834
+542:2:3835
+543:2:3833
+544:2:3834
+545:2:3835
+546:2:3846
+547:2:3853
+548:0:4468
+549:2:3133
+550:0:4468
+551:2:3857
+552:2:3858
+553:2:3859
+554:2:3871
+555:2:3872
+556:2:3876
+557:2:3877
+558:2:3885
+559:2:3890
+560:2:3894
+561:2:3895
+562:2:3903
+563:2:3904
+564:2:3908
+565:2:3909
+566:2:3903
+567:2:3904
+568:2:3908
+569:2:3909
+570:2:3917
+571:2:3922
+572:2:3923
+573:2:3934
+574:2:3935
+575:2:3936
+576:2:3947
+577:2:3952
+578:2:3953
+579:2:3964
+580:2:3965
+581:2:3966
+582:2:3964
+583:2:3965
+584:2:3966
+585:2:3977
+586:2:3987
+587:2:3988
+588:0:4468
+589:2:3133
+590:0:4468
+591:2:3997
+592:2:3998
+593:0:4468
+594:2:3133
+595:0:4468
+596:2:4002
+597:0:4468
+598:2:4010
+599:0:4468
+600:2:3128
+601:0:4468
+602:2:3130
+603:0:4468
+604:2:3131
+605:0:4468
+606:2:3132
+607:0:4468
+608:2:3133
+609:0:4468
+610:2:3134
+611:2:3135
+612:2:3139
+613:2:3140
+614:2:3148
+615:2:3149
+616:2:3153
+617:2:3154
+618:2:3162
+619:2:3167
+620:2:3171
+621:2:3172
+622:2:3180
+623:2:3181
+624:2:3185
+625:2:3186
+626:2:3180
+627:2:3181
+628:2:3182
+629:2:3194
+630:2:3199
+631:2:3200
+632:2:3211
+633:2:3212
+634:2:3213
+635:2:3224
+636:2:3229
+637:2:3230
+638:2:3241
+639:2:3242
+640:2:3243
+641:2:3241
+642:2:3242
+643:2:3243
+644:2:3254
+645:2:3262
+646:0:4468
+647:2:3133
+648:0:4468
+649:2:3266
+650:2:3270
+651:2:3271
+652:2:3275
+653:2:3279
+654:2:3280
+655:2:3284
+656:2:3292
+657:2:3293
+658:2:3297
+659:2:3301
+660:2:3302
+661:2:3297
+662:2:3298
+663:2:3306
+664:0:4468
+665:2:3133
+666:0:4468
+667:2:3314
+668:2:3315
+669:2:3316
+670:0:4468
+671:2:3133
+672:0:4468
+673:2:3324
+674:0:4468
+675:2:3133
+676:0:4468
+677:2:3327
+678:2:3328
+679:2:3332
+680:2:3333
+681:2:3341
+682:2:3342
+683:2:3346
+684:2:3347
+685:2:3355
+686:2:3360
+687:2:3361
+688:2:3373
+689:2:3374
+690:2:3378
+691:2:3379
+692:2:3373
+693:2:3374
+694:2:3378
+695:2:3379
+696:2:3387
+697:2:3392
+698:2:3393
+699:2:3404
+700:2:3405
+701:2:3406
+702:2:3417
+703:2:3422
+704:2:3423
+705:2:3434
+706:2:3435
+707:2:3436
+708:2:3434
+709:2:3435
+710:2:3436
+711:2:3447
+712:2:3454
+713:0:4468
+714:2:3133
+715:0:4468
+716:2:3458
+717:2:3459
+718:2:3460
+719:2:3472
+720:2:3473
+721:2:3477
+722:2:3478
+723:2:3486
+724:2:3491
+725:2:3495
+726:2:3496
+727:2:3504
+728:2:3505
+729:2:3509
+730:2:3510
+731:2:3504
+732:2:3505
+733:2:3509
+734:2:3510
+735:2:3518
+736:2:3523
+737:2:3524
+738:2:3535
+739:2:3536
+740:2:3537
+741:2:3548
+742:2:3553
+743:2:3554
+744:2:3565
+745:2:3566
+746:2:3567
+747:2:3565
+748:2:3566
+749:2:3567
+750:2:3578
+751:2:3589
+752:2:3590
+753:0:4468
+754:2:3133
+755:0:4468
+756:2:3597
+757:2:3598
+758:2:3602
+759:2:3603
+760:2:3611
+761:2:3612
+762:2:3616
+763:2:3617
+764:2:3625
+765:2:3630
+766:2:3634
+767:2:3635
+768:2:3643
+769:2:3644
+770:2:3648
+771:2:3649
+772:2:3643
+773:2:3644
+774:2:3648
+775:2:3649
+776:2:3657
+777:2:3662
+778:2:3663
+779:2:3674
+780:2:3675
+781:2:3676
+782:2:3687
+783:2:3692
+784:2:3693
+785:2:3704
+786:2:3705
+787:2:3706
+788:2:3704
+789:2:3705
+790:2:3706
+791:2:3717
+792:0:4468
+793:2:3133
+794:0:4468
+795:2:3857
+796:2:3858
+797:2:3862
+798:2:3863
+799:2:3871
+800:2:3872
+801:2:3876
+802:2:3877
+803:2:3885
+804:2:3890
+805:2:3894
+806:2:3895
+807:2:3903
+808:2:3904
+809:2:3908
+810:2:3909
+811:2:3903
+812:2:3904
+813:2:3908
+814:2:3909
+815:2:3917
+816:2:3922
+817:2:3923
+818:2:3934
+819:2:3935
+820:2:3936
+821:2:3947
+822:2:3952
+823:2:3953
+824:2:3964
+825:2:3965
+826:2:3966
+827:2:3964
+828:2:3965
+829:2:3966
+830:2:3977
+831:2:3987
+832:2:3988
+833:0:4468
+834:2:3133
+835:0:4468
+836:2:3997
+837:2:3998
+838:0:4468
+839:2:3133
+840:0:4468
+841:2:3726
+842:2:3727
+843:2:3731
+844:2:3732
+845:2:3740
+846:2:3741
+847:2:3745
+848:2:3746
+849:2:3754
+850:2:3759
+851:2:3763
+852:2:3764
+853:2:3772
+854:2:3773
+855:2:3774
+856:2:3772
+857:2:3773
+858:2:3777
+859:2:3778
+860:2:3786
+861:2:3791
+862:2:3792
+863:2:3803
+864:2:3804
+865:2:3805
+866:2:3816
+867:2:3821
+868:2:3822
+869:2:3833
+870:2:3834
+871:2:3835
+872:2:3833
+873:2:3834
+874:2:3835
+875:2:3846
+876:2:3853
+877:0:4468
+878:2:3133
+879:0:4468
+880:2:4002
+881:0:4468
+882:2:4010
+883:0:4468
+884:2:4011
+885:0:4468
+886:2:4016
+887:0:4468
+888:1:2
+889:0:4468
+890:2:4017
+891:0:4468
+892:1:8
+893:0:4468
+894:2:4016
+895:0:4468
+896:1:9
+897:0:4468
+898:2:4017
+899:0:4468
+900:1:10
+901:0:4468
+902:2:4016
+903:0:4468
+904:1:11
+905:0:4468
+906:2:4017
+907:0:4468
+908:1:12
+909:0:4468
+910:2:4016
+911:0:4468
+912:1:13
+913:0:4468
+914:2:4017
+915:0:4468
+916:1:14
+917:0:4468
+918:2:4016
+919:0:4468
+920:1:15
+921:0:4468
+922:2:4017
+923:0:4468
+924:1:16
+925:1:17
+926:1:21
+927:1:22
+928:1:30
+929:1:31
+930:1:35
+931:1:36
+932:1:44
+933:1:49
+934:1:53
+935:1:54
+936:1:62
+937:1:63
+938:1:67
+939:1:68
+940:1:62
+941:1:63
+942:1:67
+943:1:68
+944:1:76
+945:1:81
+946:1:82
+947:1:93
+948:1:94
+949:1:95
+950:1:106
+951:1:118
+952:1:119
+953:1:123
+954:1:124
+955:1:125
+956:1:123
+957:1:124
+958:1:125
+959:1:136
+960:0:4468
+961:2:4016
+962:0:4468
+963:1:15
+964:0:4468
+965:2:4017
+966:0:4468
+967:1:145
+968:1:146
+969:0:4468
+970:2:4016
+971:0:4468
+972:1:15
+973:0:4468
+974:2:4017
+975:0:4468
+976:1:152
+977:1:153
+978:1:157
+979:1:158
+980:1:166
+981:1:167
+982:1:171
+983:1:172
+984:1:180
+985:1:185
+986:1:189
+987:1:190
+988:1:198
+989:1:199
+990:1:203
+991:1:204
+992:1:198
+993:1:199
+994:1:203
+995:1:204
+996:1:212
+997:1:217
+998:1:218
+999:1:229
+1000:1:230
+1001:1:231
+1002:1:242
+1003:1:254
+1004:1:255
+1005:1:259
+1006:1:260
+1007:1:261
+1008:1:259
+1009:1:260
+1010:1:261
+1011:1:272
+1012:0:4468
+1013:2:4016
+1014:0:4468
+1015:1:15
+1016:0:4468
+1017:2:4017
+1018:0:4468
+1019:1:281
+1020:1:282
+1021:1:286
+1022:1:287
+1023:1:295
+1024:1:296
+1025:1:300
+1026:1:301
+1027:1:309
+1028:1:314
+1029:1:318
+1030:1:319
+1031:1:327
+1032:1:328
+1033:1:332
+1034:1:333
+1035:1:327
+1036:1:328
+1037:1:332
+1038:1:333
+1039:1:341
+1040:1:346
+1041:1:347
+1042:1:358
+1043:1:359
+1044:1:360
+1045:1:371
+1046:1:383
+1047:1:384
+1048:1:388
+1049:1:389
+1050:1:390
+1051:1:388
+1052:1:389
+1053:1:390
+1054:1:401
+1055:1:408
+1056:0:4468
+1057:2:4016
+1058:0:4468
+1059:1:15
+1060:0:4468
+1061:2:4017
+1062:0:4468
+1063:1:636
+1064:1:637
+1065:1:641
+1066:1:642
+1067:1:650
+1068:1:651
+1069:1:652
+1070:1:664
+1071:1:669
+1072:1:673
+1073:1:674
+1074:1:682
+1075:1:683
+1076:1:687
+1077:1:688
+1078:1:682
+1079:1:683
+1080:1:687
+1081:1:688
+1082:1:696
+1083:1:701
+1084:1:702
+1085:1:713
+1086:1:714
+1087:1:715
+1088:1:726
+1089:1:738
+1090:1:739
+1091:1:743
+1092:1:744
+1093:1:745
+1094:1:743
+1095:1:744
+1096:1:745
+1097:1:756
+1098:0:4468
+1099:2:4016
+1100:0:4468
+1101:1:15
+1102:0:4468
+1103:2:4017
+1104:0:4468
+1105:1:765
+1106:1:768
+1107:1:769
+1108:0:4468
+1109:2:4016
+1110:0:4468
+1111:1:15
+1112:0:4468
+1113:2:4017
+1114:0:4468
+1115:1:772
+1116:1:773
+1117:1:777
+1118:1:778
+1119:1:786
+1120:1:787
+1121:1:791
+1122:1:792
+1123:1:800
+1124:1:805
+1125:1:809
+1126:1:810
+1127:1:818
+1128:1:819
+1129:1:823
+1130:1:824
+1131:1:818
+1132:1:819
+1133:1:823
+1134:1:824
+1135:1:832
+1136:1:837
+1137:1:838
+1138:1:849
+1139:1:850
+1140:1:851
+1141:1:862
+1142:1:874
+1143:1:875
+1144:1:879
+1145:1:880
+1146:1:881
+1147:1:879
+1148:1:880
+1149:1:881
+1150:1:892
+1151:0:4468
+1152:2:4016
+1153:0:4468
+1154:1:15
+1155:0:4468
+1156:2:4017
+1157:0:4468
+1158:1:1032
+1159:1:1033
+1160:1:1037
+1161:1:1038
+1162:1:1046
+1163:1:1047
+1164:1:1051
+1165:1:1052
+1166:1:1060
+1167:1:1065
+1168:1:1069
+1169:1:1070
+1170:1:1078
+1171:1:1079
+1172:1:1083
+1173:1:1084
+1174:1:1078
+1175:1:1079
+1176:1:1083
+1177:1:1084
+1178:1:1092
+1179:1:1097
+1180:1:1098
+1181:1:1109
+1182:1:1110
+1183:1:1111
+1184:1:1122
+1185:1:1134
+1186:1:1135
+1187:1:1139
+1188:1:1140
+1189:1:1141
+1190:1:1139
+1191:1:1140
+1192:1:1141
+1193:1:1152
+1194:1:1159
+1195:1:1163
+1196:0:4468
+1197:2:4016
+1198:0:4468
+1199:1:15
+1200:0:4468
+1201:2:4017
+1202:0:4468
+1203:1:1164
+1204:1:1165
+1205:1:1169
+1206:1:1170
+1207:1:1178
+1208:1:1179
+1209:1:1180
+1210:1:1192
+1211:1:1197
+1212:1:1201
+1213:1:1202
+1214:1:1210
+1215:1:1211
+1216:1:1215
+1217:1:1216
+1218:1:1210
+1219:1:1211
+1220:1:1215
+1221:1:1216
+1222:1:1224
+1223:1:1229
+1224:1:1230
+1225:1:1241
+1226:1:1242
+1227:1:1243
+1228:1:1254
+1229:1:1266
+1230:1:1267
+1231:1:1271
+1232:1:1272
+1233:1:1273
+1234:1:1271
+1235:1:1272
+1236:1:1273
+1237:1:1284
+1238:0:4468
+1239:2:4016
+1240:0:4468
+1241:1:15
+1242:0:4468
+1243:2:4017
+1244:0:4468
+1245:1:1293
+1246:0:4468
+1247:2:4016
+1248:0:4468
+1249:1:3027
+1250:1:3034
+1251:1:3035
+1252:1:3042
+1253:1:3047
+1254:1:3054
+1255:1:3055
+1256:1:3054
+1257:1:3055
+1258:1:3062
+1259:1:3066
+1260:0:4468
+1261:2:4017
+1262:0:4468
+1263:1:1295
+1264:1:1296
+1265:0:4466
+1266:2:4016
+1267:0:4472
+1268:1:756
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.define b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.define
new file mode 100644 (file)
index 0000000..73e61a4
--- /dev/null
@@ -0,0 +1 @@
+#define NO_RMB
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.log b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.log
new file mode 100644 (file)
index 0000000..3da0a27
--- /dev/null
@@ -0,0 +1,536 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_rmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+pan: claim violated! (at depth 2092)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 5332, errors: 1
+   615700 states, stored
+3.8337558e+08 states, matched
+3.8399128e+08 transitions (= stored+matched)
+2.147943e+09 atomic steps
+hash conflicts:  90741041 (resolved)
+
+Stats on memory usage (in Megabytes):
+   68.113      equivalent memory usage for states (stored*(State-vector + overhead))
+   52.415      actual memory usage for states (compression: 76.95%)
+               state-vector as stored = 61 byte + 28 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  518.108      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 82, "(1)"
+       line 438, "pan.___", state 112, "(1)"
+       line 442, "pan.___", state 125, "(1)"
+       line 597, "pan.___", state 146, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 153, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 185, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 199, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 218, "(1)"
+       line 438, "pan.___", state 248, "(1)"
+       line 442, "pan.___", state 261, "(1)"
+       line 411, "pan.___", state 282, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 314, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 347, "(1)"
+       line 438, "pan.___", state 377, "(1)"
+       line 442, "pan.___", state 390, "(1)"
+       line 411, "pan.___", state 413, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 415, "(1)"
+       line 411, "pan.___", state 416, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 416, "else"
+       line 411, "pan.___", state 419, "(1)"
+       line 415, "pan.___", state 427, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 429, "(1)"
+       line 415, "pan.___", state 430, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 430, "else"
+       line 415, "pan.___", state 433, "(1)"
+       line 415, "pan.___", state 434, "(1)"
+       line 415, "pan.___", state 434, "(1)"
+       line 413, "pan.___", state 439, "((i<1))"
+       line 413, "pan.___", state 439, "((i>=1))"
+       line 420, "pan.___", state 445, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 447, "(1)"
+       line 420, "pan.___", state 448, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 448, "else"
+       line 420, "pan.___", state 451, "(1)"
+       line 420, "pan.___", state 452, "(1)"
+       line 420, "pan.___", state 452, "(1)"
+       line 424, "pan.___", state 459, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 461, "(1)"
+       line 424, "pan.___", state 462, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 462, "else"
+       line 424, "pan.___", state 465, "(1)"
+       line 424, "pan.___", state 466, "(1)"
+       line 424, "pan.___", state 466, "(1)"
+       line 422, "pan.___", state 471, "((i<2))"
+       line 422, "pan.___", state 471, "((i>=2))"
+       line 429, "pan.___", state 478, "(1)"
+       line 429, "pan.___", state 479, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 479, "else"
+       line 429, "pan.___", state 482, "(1)"
+       line 429, "pan.___", state 483, "(1)"
+       line 429, "pan.___", state 483, "(1)"
+       line 433, "pan.___", state 491, "(1)"
+       line 433, "pan.___", state 492, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 492, "else"
+       line 433, "pan.___", state 495, "(1)"
+       line 433, "pan.___", state 496, "(1)"
+       line 433, "pan.___", state 496, "(1)"
+       line 431, "pan.___", state 501, "((i<1))"
+       line 431, "pan.___", state 501, "((i>=1))"
+       line 438, "pan.___", state 508, "(1)"
+       line 438, "pan.___", state 509, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 509, "else"
+       line 438, "pan.___", state 512, "(1)"
+       line 438, "pan.___", state 513, "(1)"
+       line 438, "pan.___", state 513, "(1)"
+       line 442, "pan.___", state 521, "(1)"
+       line 442, "pan.___", state 522, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 522, "else"
+       line 442, "pan.___", state 525, "(1)"
+       line 442, "pan.___", state 526, "(1)"
+       line 442, "pan.___", state 526, "(1)"
+       line 440, "pan.___", state 531, "((i<2))"
+       line 440, "pan.___", state 531, "((i>=2))"
+       line 450, "pan.___", state 535, "(1)"
+       line 450, "pan.___", state 535, "(1)"
+       line 597, "pan.___", state 538, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 597, "pan.___", state 539, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 597, "pan.___", state 540, "(1)"
+       line 272, "pan.___", state 544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 555, "(1)"
+       line 280, "pan.___", state 566, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 575, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 591, "(1)"
+       line 253, "pan.___", state 599, "(1)"
+       line 257, "pan.___", state 611, "(1)"
+       line 261, "pan.___", state 619, "(1)"
+       line 411, "pan.___", state 637, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 651, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 669, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 683, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 702, "(1)"
+       line 433, "pan.___", state 715, "(1)"
+       line 438, "pan.___", state 732, "(1)"
+       line 442, "pan.___", state 745, "(1)"
+       line 411, "pan.___", state 773, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 805, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 819, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 838, "(1)"
+       line 438, "pan.___", state 868, "(1)"
+       line 442, "pan.___", state 881, "(1)"
+       line 411, "pan.___", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 904, "(1)"
+       line 411, "pan.___", state 905, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 905, "else"
+       line 411, "pan.___", state 908, "(1)"
+       line 415, "pan.___", state 916, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 918, "(1)"
+       line 415, "pan.___", state 919, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 919, "else"
+       line 415, "pan.___", state 922, "(1)"
+       line 415, "pan.___", state 923, "(1)"
+       line 415, "pan.___", state 923, "(1)"
+       line 413, "pan.___", state 928, "((i<1))"
+       line 413, "pan.___", state 928, "((i>=1))"
+       line 420, "pan.___", state 934, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 936, "(1)"
+       line 420, "pan.___", state 937, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 937, "else"
+       line 420, "pan.___", state 940, "(1)"
+       line 420, "pan.___", state 941, "(1)"
+       line 420, "pan.___", state 941, "(1)"
+       line 424, "pan.___", state 948, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 950, "(1)"
+       line 424, "pan.___", state 951, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 951, "else"
+       line 424, "pan.___", state 954, "(1)"
+       line 424, "pan.___", state 955, "(1)"
+       line 424, "pan.___", state 955, "(1)"
+       line 422, "pan.___", state 960, "((i<2))"
+       line 422, "pan.___", state 960, "((i>=2))"
+       line 429, "pan.___", state 967, "(1)"
+       line 429, "pan.___", state 968, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 968, "else"
+       line 429, "pan.___", state 971, "(1)"
+       line 429, "pan.___", state 972, "(1)"
+       line 429, "pan.___", state 972, "(1)"
+       line 433, "pan.___", state 980, "(1)"
+       line 433, "pan.___", state 981, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 981, "else"
+       line 433, "pan.___", state 984, "(1)"
+       line 433, "pan.___", state 985, "(1)"
+       line 433, "pan.___", state 985, "(1)"
+       line 431, "pan.___", state 990, "((i<1))"
+       line 431, "pan.___", state 990, "((i>=1))"
+       line 438, "pan.___", state 997, "(1)"
+       line 438, "pan.___", state 998, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 998, "else"
+       line 438, "pan.___", state 1001, "(1)"
+       line 438, "pan.___", state 1002, "(1)"
+       line 438, "pan.___", state 1002, "(1)"
+       line 442, "pan.___", state 1010, "(1)"
+       line 442, "pan.___", state 1011, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 1011, "else"
+       line 442, "pan.___", state 1014, "(1)"
+       line 442, "pan.___", state 1015, "(1)"
+       line 442, "pan.___", state 1015, "(1)"
+       line 440, "pan.___", state 1020, "((i<2))"
+       line 440, "pan.___", state 1020, "((i>=2))"
+       line 450, "pan.___", state 1024, "(1)"
+       line 450, "pan.___", state 1024, "(1)"
+       line 605, "pan.___", state 1028, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 1033, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1047, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1065, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1079, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1098, "(1)"
+       line 433, "pan.___", state 1111, "(1)"
+       line 438, "pan.___", state 1128, "(1)"
+       line 442, "pan.___", state 1141, "(1)"
+       line 411, "pan.___", state 1165, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1197, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1211, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1230, "(1)"
+       line 438, "pan.___", state 1260, "(1)"
+       line 442, "pan.___", state 1273, "(1)"
+       line 411, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1363, "(1)"
+       line 438, "pan.___", state 1393, "(1)"
+       line 442, "pan.___", state 1406, "(1)"
+       line 411, "pan.___", state 1427, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1492, "(1)"
+       line 438, "pan.___", state 1522, "(1)"
+       line 442, "pan.___", state 1535, "(1)"
+       line 272, "pan.___", state 1558, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1580, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1589, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1605, "(1)"
+       line 253, "pan.___", state 1613, "(1)"
+       line 257, "pan.___", state 1625, "(1)"
+       line 261, "pan.___", state 1633, "(1)"
+       line 411, "pan.___", state 1651, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1716, "(1)"
+       line 433, "pan.___", state 1729, "(1)"
+       line 438, "pan.___", state 1746, "(1)"
+       line 442, "pan.___", state 1759, "(1)"
+       line 411, "pan.___", state 1780, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1794, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1812, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1845, "(1)"
+       line 433, "pan.___", state 1858, "(1)"
+       line 438, "pan.___", state 1875, "(1)"
+       line 442, "pan.___", state 1888, "(1)"
+       line 411, "pan.___", state 1912, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1944, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1958, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1977, "(1)"
+       line 438, "pan.___", state 2007, "(1)"
+       line 442, "pan.___", state 2020, "(1)"
+       line 644, "pan.___", state 2041, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 2048, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2080, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2094, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2113, "(1)"
+       line 438, "pan.___", state 2143, "(1)"
+       line 442, "pan.___", state 2156, "(1)"
+       line 411, "pan.___", state 2177, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2209, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2242, "(1)"
+       line 438, "pan.___", state 2272, "(1)"
+       line 442, "pan.___", state 2285, "(1)"
+       line 411, "pan.___", state 2308, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2310, "(1)"
+       line 411, "pan.___", state 2311, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2311, "else"
+       line 411, "pan.___", state 2314, "(1)"
+       line 415, "pan.___", state 2322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2324, "(1)"
+       line 415, "pan.___", state 2325, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2325, "else"
+       line 415, "pan.___", state 2328, "(1)"
+       line 415, "pan.___", state 2329, "(1)"
+       line 415, "pan.___", state 2329, "(1)"
+       line 413, "pan.___", state 2334, "((i<1))"
+       line 413, "pan.___", state 2334, "((i>=1))"
+       line 420, "pan.___", state 2340, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2342, "(1)"
+       line 420, "pan.___", state 2343, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2343, "else"
+       line 420, "pan.___", state 2346, "(1)"
+       line 420, "pan.___", state 2347, "(1)"
+       line 420, "pan.___", state 2347, "(1)"
+       line 424, "pan.___", state 2354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2356, "(1)"
+       line 424, "pan.___", state 2357, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2357, "else"
+       line 424, "pan.___", state 2360, "(1)"
+       line 424, "pan.___", state 2361, "(1)"
+       line 424, "pan.___", state 2361, "(1)"
+       line 422, "pan.___", state 2366, "((i<2))"
+       line 422, "pan.___", state 2366, "((i>=2))"
+       line 429, "pan.___", state 2373, "(1)"
+       line 429, "pan.___", state 2374, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 2374, "else"
+       line 429, "pan.___", state 2377, "(1)"
+       line 429, "pan.___", state 2378, "(1)"
+       line 429, "pan.___", state 2378, "(1)"
+       line 433, "pan.___", state 2386, "(1)"
+       line 433, "pan.___", state 2387, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 2387, "else"
+       line 433, "pan.___", state 2390, "(1)"
+       line 433, "pan.___", state 2391, "(1)"
+       line 433, "pan.___", state 2391, "(1)"
+       line 431, "pan.___", state 2396, "((i<1))"
+       line 431, "pan.___", state 2396, "((i>=1))"
+       line 438, "pan.___", state 2403, "(1)"
+       line 438, "pan.___", state 2404, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 2404, "else"
+       line 438, "pan.___", state 2407, "(1)"
+       line 438, "pan.___", state 2408, "(1)"
+       line 438, "pan.___", state 2408, "(1)"
+       line 442, "pan.___", state 2416, "(1)"
+       line 442, "pan.___", state 2417, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 2417, "else"
+       line 442, "pan.___", state 2420, "(1)"
+       line 442, "pan.___", state 2421, "(1)"
+       line 442, "pan.___", state 2421, "(1)"
+       line 440, "pan.___", state 2426, "((i<2))"
+       line 440, "pan.___", state 2426, "((i>=2))"
+       line 450, "pan.___", state 2430, "(1)"
+       line 450, "pan.___", state 2430, "(1)"
+       line 644, "pan.___", state 2433, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 644, "pan.___", state 2434, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 644, "pan.___", state 2435, "(1)"
+       line 272, "pan.___", state 2439, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2461, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2470, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2486, "(1)"
+       line 253, "pan.___", state 2494, "(1)"
+       line 257, "pan.___", state 2506, "(1)"
+       line 261, "pan.___", state 2514, "(1)"
+       line 411, "pan.___", state 2532, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2546, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2564, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2578, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2597, "(1)"
+       line 433, "pan.___", state 2610, "(1)"
+       line 438, "pan.___", state 2627, "(1)"
+       line 442, "pan.___", state 2640, "(1)"
+       line 272, "pan.___", state 2664, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2673, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2686, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2695, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2711, "(1)"
+       line 253, "pan.___", state 2719, "(1)"
+       line 257, "pan.___", state 2731, "(1)"
+       line 261, "pan.___", state 2739, "(1)"
+       line 411, "pan.___", state 2757, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2771, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2789, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2803, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2822, "(1)"
+       line 433, "pan.___", state 2835, "(1)"
+       line 438, "pan.___", state 2852, "(1)"
+       line 442, "pan.___", state 2865, "(1)"
+       line 411, "pan.___", state 2886, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2900, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2918, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2951, "(1)"
+       line 433, "pan.___", state 2964, "(1)"
+       line 438, "pan.___", state 2981, "(1)"
+       line 442, "pan.___", state 2994, "(1)"
+       line 411, "pan.___", state 3027, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 3059, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 3073, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 3092, "(1)"
+       line 438, "pan.___", state 3122, "(1)"
+       line 442, "pan.___", state 3135, "(1)"
+       line 411, "pan.___", state 3154, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 3168, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 3186, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 3200, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 3219, "(1)"
+       line 433, "pan.___", state 3232, "(1)"
+       line 438, "pan.___", state 3249, "(1)"
+       line 442, "pan.___", state 3262, "(1)"
+       line 898, "pan.___", state 3283, "-end-"
+       (290 of 3283 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 24, "(1)"
+       line 415, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 38, "(1)"
+       line 415, "pan.___", state 39, "(1)"
+       line 415, "pan.___", state 39, "(1)"
+       line 413, "pan.___", state 44, "((i<1))"
+       line 413, "pan.___", state 44, "((i>=1))"
+       line 420, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 56, "(1)"
+       line 420, "pan.___", state 57, "(1)"
+       line 420, "pan.___", state 57, "(1)"
+       line 424, "pan.___", state 70, "(1)"
+       line 424, "pan.___", state 71, "(1)"
+       line 424, "pan.___", state 71, "(1)"
+       line 422, "pan.___", state 76, "((i<2))"
+       line 422, "pan.___", state 76, "((i>=2))"
+       line 429, "pan.___", state 83, "(1)"
+       line 429, "pan.___", state 84, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 84, "else"
+       line 429, "pan.___", state 87, "(1)"
+       line 429, "pan.___", state 88, "(1)"
+       line 429, "pan.___", state 88, "(1)"
+       line 433, "pan.___", state 96, "(1)"
+       line 433, "pan.___", state 97, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 97, "else"
+       line 433, "pan.___", state 100, "(1)"
+       line 433, "pan.___", state 101, "(1)"
+       line 433, "pan.___", state 101, "(1)"
+       line 431, "pan.___", state 106, "((i<1))"
+       line 431, "pan.___", state 106, "((i>=1))"
+       line 438, "pan.___", state 113, "(1)"
+       line 438, "pan.___", state 114, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 114, "else"
+       line 438, "pan.___", state 117, "(1)"
+       line 438, "pan.___", state 118, "(1)"
+       line 438, "pan.___", state 118, "(1)"
+       line 442, "pan.___", state 126, "(1)"
+       line 442, "pan.___", state 127, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 127, "else"
+       line 442, "pan.___", state 130, "(1)"
+       line 442, "pan.___", state 131, "(1)"
+       line 442, "pan.___", state 131, "(1)"
+       line 440, "pan.___", state 136, "((i<2))"
+       line 440, "pan.___", state 136, "((i>=2))"
+       line 450, "pan.___", state 140, "(1)"
+       line 450, "pan.___", state 140, "(1)"
+       line 272, "pan.___", state 149, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 158, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 171, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 211, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 225, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 243, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 257, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 276, "(1)"
+       line 433, "pan.___", state 289, "(1)"
+       line 438, "pan.___", state 306, "(1)"
+       line 442, "pan.___", state 319, "(1)"
+       line 415, "pan.___", state 356, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 374, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 388, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 420, "(1)"
+       line 438, "pan.___", state 437, "(1)"
+       line 442, "pan.___", state 450, "(1)"
+       line 415, "pan.___", state 495, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 513, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 527, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 559, "(1)"
+       line 438, "pan.___", state 576, "(1)"
+       line 442, "pan.___", state 589, "(1)"
+       line 415, "pan.___", state 624, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 642, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 656, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 688, "(1)"
+       line 438, "pan.___", state 705, "(1)"
+       line 442, "pan.___", state 718, "(1)"
+       line 415, "pan.___", state 755, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 773, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 787, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 819, "(1)"
+       line 438, "pan.___", state 836, "(1)"
+       line 442, "pan.___", state 849, "(1)"
+       line 272, "pan.___", state 904, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 913, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 928, "(1)"
+       line 284, "pan.___", state 935, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 951, "(1)"
+       line 253, "pan.___", state 959, "(1)"
+       line 257, "pan.___", state 971, "(1)"
+       line 261, "pan.___", state 979, "(1)"
+       line 276, "pan.___", state 1004, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1017, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1026, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1042, "(1)"
+       line 253, "pan.___", state 1050, "(1)"
+       line 257, "pan.___", state 1062, "(1)"
+       line 261, "pan.___", state 1070, "(1)"
+       line 276, "pan.___", state 1095, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1108, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1117, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1133, "(1)"
+       line 253, "pan.___", state 1141, "(1)"
+       line 257, "pan.___", state 1153, "(1)"
+       line 261, "pan.___", state 1161, "(1)"
+       line 276, "pan.___", state 1186, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1199, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1208, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1224, "(1)"
+       line 253, "pan.___", state 1232, "(1)"
+       line 257, "pan.___", state 1244, "(1)"
+       line 261, "pan.___", state 1252, "(1)"
+       line 1237, "pan.___", state 1267, "-end-"
+       (96 of 1267 states)
+unreached in proctype :init:
+       line 1248, "pan.___", state 9, "((j<2))"
+       line 1248, "pan.___", state 9, "((j>=2))"
+       line 1249, "pan.___", state 20, "((j<2))"
+       line 1249, "pan.___", state 20, "((j>=2))"
+       line 1254, "pan.___", state 33, "((j<2))"
+       line 1254, "pan.___", state 33, "((j>=2))"
+       line 1252, "pan.___", state 43, "((i<1))"
+       line 1252, "pan.___", state 43, "((i>=1))"
+       line 1262, "pan.___", state 54, "((j<2))"
+       line 1262, "pan.___", state 54, "((j>=2))"
+       line 1266, "pan.___", state 67, "((j<2))"
+       line 1266, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 474 seconds
+pan: rate 1298.3425 states/second
+pan: avg transition delay 1.235e-06 usec
+cp .input.spin urcu_free_no_rmb.spin.input
+cp .input.spin.trail urcu_free_no_rmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.spin.input b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.spin.input
new file mode 100644 (file)
index 0000000..8cb5f81
--- /dev/null
@@ -0,0 +1,1273 @@
+#define NO_RMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.spin.input.trail b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_rmb.spin.input.trail
new file mode 100644 (file)
index 0000000..de03f6a
--- /dev/null
@@ -0,0 +1,2095 @@
+-2:3:-2
+-4:-4:-4
+1:0:4630
+2:3:4550
+3:3:4553
+4:3:4553
+5:3:4556
+6:3:4564
+7:3:4564
+8:3:4567
+9:3:4573
+10:3:4577
+11:3:4577
+12:3:4580
+13:3:4590
+14:3:4598
+15:3:4598
+16:3:4601
+17:3:4607
+18:3:4611
+19:3:4611
+20:3:4614
+21:3:4620
+22:3:4624
+23:3:4625
+24:0:4630
+25:3:4627
+26:0:4630
+27:2:3285
+28:0:4630
+29:2:3291
+30:0:4630
+31:2:3292
+32:0:4630
+33:2:3294
+34:0:4630
+35:2:3295
+36:0:4630
+37:2:3296
+38:2:3297
+39:2:3301
+40:2:3302
+41:2:3310
+42:2:3311
+43:2:3315
+44:2:3316
+45:2:3324
+46:2:3329
+47:2:3333
+48:2:3334
+49:2:3342
+50:2:3343
+51:2:3347
+52:2:3348
+53:2:3342
+54:2:3343
+55:2:3347
+56:2:3348
+57:2:3356
+58:2:3361
+59:2:3362
+60:2:3373
+61:2:3374
+62:2:3375
+63:2:3386
+64:2:3391
+65:2:3392
+66:2:3403
+67:2:3404
+68:2:3405
+69:2:3403
+70:2:3404
+71:2:3405
+72:2:3416
+73:2:3424
+74:0:4630
+75:2:3295
+76:0:4630
+77:2:3428
+78:2:3432
+79:2:3433
+80:2:3437
+81:2:3441
+82:2:3442
+83:2:3446
+84:2:3454
+85:2:3455
+86:2:3459
+87:2:3463
+88:2:3464
+89:2:3459
+90:2:3460
+91:2:3468
+92:0:4630
+93:2:3295
+94:0:4630
+95:2:3476
+96:2:3477
+97:2:3478
+98:0:4630
+99:2:3295
+100:0:4630
+101:2:3483
+102:0:4630
+103:2:4187
+104:2:4188
+105:2:4192
+106:2:4196
+107:2:4197
+108:2:4201
+109:2:4206
+110:2:4214
+111:2:4218
+112:2:4219
+113:2:4214
+114:2:4218
+115:2:4219
+116:2:4223
+117:2:4230
+118:2:4237
+119:2:4238
+120:2:4245
+121:2:4250
+122:2:4257
+123:2:4258
+124:2:4257
+125:2:4258
+126:2:4265
+127:2:4269
+128:0:4630
+129:2:3485
+130:2:4168
+131:0:4630
+132:2:3295
+133:0:4630
+134:2:3486
+135:0:4630
+136:2:3295
+137:0:4630
+138:2:3489
+139:2:3490
+140:2:3494
+141:2:3495
+142:2:3503
+143:2:3504
+144:2:3508
+145:2:3509
+146:2:3517
+147:2:3522
+148:2:3526
+149:2:3527
+150:2:3535
+151:2:3536
+152:2:3540
+153:2:3541
+154:2:3535
+155:2:3536
+156:2:3540
+157:2:3541
+158:2:3549
+159:2:3554
+160:2:3555
+161:2:3566
+162:2:3567
+163:2:3568
+164:2:3579
+165:2:3584
+166:2:3585
+167:2:3596
+168:2:3597
+169:2:3598
+170:2:3596
+171:2:3597
+172:2:3598
+173:2:3609
+174:2:3616
+175:0:4630
+176:2:3295
+177:0:4630
+178:2:3620
+179:2:3621
+180:2:3622
+181:2:3634
+182:2:3635
+183:2:3639
+184:2:3640
+185:2:3648
+186:2:3653
+187:2:3657
+188:2:3658
+189:2:3666
+190:2:3667
+191:2:3671
+192:2:3672
+193:2:3666
+194:2:3667
+195:2:3671
+196:2:3672
+197:2:3680
+198:2:3685
+199:2:3686
+200:2:3697
+201:2:3698
+202:2:3699
+203:2:3710
+204:2:3715
+205:2:3716
+206:2:3727
+207:2:3728
+208:2:3729
+209:2:3727
+210:2:3728
+211:2:3729
+212:2:3740
+213:2:3751
+214:2:3752
+215:0:4630
+216:2:3295
+217:0:4630
+218:2:3759
+219:2:3760
+220:2:3764
+221:2:3765
+222:2:3773
+223:2:3774
+224:2:3778
+225:2:3779
+226:2:3787
+227:2:3792
+228:2:3796
+229:2:3797
+230:2:3805
+231:2:3806
+232:2:3810
+233:2:3811
+234:2:3805
+235:2:3806
+236:2:3810
+237:2:3811
+238:2:3819
+239:2:3824
+240:2:3825
+241:2:3836
+242:2:3837
+243:2:3838
+244:2:3849
+245:2:3854
+246:2:3855
+247:2:3866
+248:2:3867
+249:2:3868
+250:2:3866
+251:2:3867
+252:2:3868
+253:2:3879
+254:0:4630
+255:2:3295
+256:0:4630
+257:2:3888
+258:2:3889
+259:2:3893
+260:2:3894
+261:2:3902
+262:2:3903
+263:2:3907
+264:2:3908
+265:2:3916
+266:2:3921
+267:2:3925
+268:2:3926
+269:2:3934
+270:2:3935
+271:2:3939
+272:2:3940
+273:2:3934
+274:2:3935
+275:2:3939
+276:2:3940
+277:2:3948
+278:2:3953
+279:2:3954
+280:2:3965
+281:2:3966
+282:2:3967
+283:2:3978
+284:2:3983
+285:2:3984
+286:2:3995
+287:2:3996
+288:2:3997
+289:2:3995
+290:2:3996
+291:2:3997
+292:2:4008
+293:2:4015
+294:0:4630
+295:2:3295
+296:0:4630
+297:2:4019
+298:2:4020
+299:2:4021
+300:2:4033
+301:2:4034
+302:2:4038
+303:2:4039
+304:2:4047
+305:2:4052
+306:2:4056
+307:2:4057
+308:2:4065
+309:2:4066
+310:2:4070
+311:2:4071
+312:2:4065
+313:2:4066
+314:2:4070
+315:2:4071
+316:2:4079
+317:2:4084
+318:2:4085
+319:2:4096
+320:2:4097
+321:2:4098
+322:2:4109
+323:2:4114
+324:2:4115
+325:2:4126
+326:2:4127
+327:2:4128
+328:2:4126
+329:2:4127
+330:2:4128
+331:2:4139
+332:2:4149
+333:2:4150
+334:0:4630
+335:2:3295
+336:0:4630
+337:2:4156
+338:0:4630
+339:2:4460
+340:2:4461
+341:2:4465
+342:2:4469
+343:2:4470
+344:2:4474
+345:2:4482
+346:2:4483
+347:2:4487
+348:2:4491
+349:2:4492
+350:2:4487
+351:2:4491
+352:2:4492
+353:2:4496
+354:2:4503
+355:2:4510
+356:2:4511
+357:2:4518
+358:2:4523
+359:2:4530
+360:2:4531
+361:2:4530
+362:2:4531
+363:2:4538
+364:2:4542
+365:0:4630
+366:2:4158
+367:2:4168
+368:0:4630
+369:2:3295
+370:0:4630
+371:2:4159
+372:2:4160
+373:0:4630
+374:2:3295
+375:0:4630
+376:2:4164
+377:0:4630
+378:2:4172
+379:0:4630
+380:2:3292
+381:0:4630
+382:2:3294
+383:0:4630
+384:2:3295
+385:0:4630
+386:2:3296
+387:2:3297
+388:2:3301
+389:2:3302
+390:2:3310
+391:2:3311
+392:2:3315
+393:2:3316
+394:2:3324
+395:2:3329
+396:2:3333
+397:2:3334
+398:2:3342
+399:2:3343
+400:2:3344
+401:2:3342
+402:2:3343
+403:2:3347
+404:2:3348
+405:2:3356
+406:2:3361
+407:2:3362
+408:2:3373
+409:2:3374
+410:2:3375
+411:2:3386
+412:2:3391
+413:2:3392
+414:2:3403
+415:2:3404
+416:2:3405
+417:2:3403
+418:2:3404
+419:2:3405
+420:2:3416
+421:2:3424
+422:0:4630
+423:2:3295
+424:0:4630
+425:2:3428
+426:2:3432
+427:2:3433
+428:2:3437
+429:2:3441
+430:2:3442
+431:2:3446
+432:2:3454
+433:2:3455
+434:2:3459
+435:2:3460
+436:2:3459
+437:2:3463
+438:2:3464
+439:2:3468
+440:0:4630
+441:2:3295
+442:0:4630
+443:2:3476
+444:2:3477
+445:2:3478
+446:0:4630
+447:2:3295
+448:0:4630
+449:2:3483
+450:0:4630
+451:2:4187
+452:2:4188
+453:2:4192
+454:2:4196
+455:2:4197
+456:2:4201
+457:2:4206
+458:2:4214
+459:2:4218
+460:2:4219
+461:2:4214
+462:2:4218
+463:2:4219
+464:2:4223
+465:2:4230
+466:2:4237
+467:2:4238
+468:2:4245
+469:2:4250
+470:2:4257
+471:2:4258
+472:2:4257
+473:2:4258
+474:2:4265
+475:2:4269
+476:0:4630
+477:2:3485
+478:2:4168
+479:0:4630
+480:2:3295
+481:0:4630
+482:2:3486
+483:0:4630
+484:2:3295
+485:0:4630
+486:2:3489
+487:2:3490
+488:2:3494
+489:2:3495
+490:2:3503
+491:2:3504
+492:2:3508
+493:2:3509
+494:2:3517
+495:2:3522
+496:2:3526
+497:2:3527
+498:2:3535
+499:2:3536
+500:2:3540
+501:2:3541
+502:2:3535
+503:2:3536
+504:2:3540
+505:2:3541
+506:2:3549
+507:2:3554
+508:2:3555
+509:2:3566
+510:2:3567
+511:2:3568
+512:2:3579
+513:2:3584
+514:2:3585
+515:2:3596
+516:2:3597
+517:2:3598
+518:2:3596
+519:2:3597
+520:2:3598
+521:2:3609
+522:2:3616
+523:0:4630
+524:2:3295
+525:0:4630
+526:2:3620
+527:2:3621
+528:2:3622
+529:2:3634
+530:2:3635
+531:2:3639
+532:2:3640
+533:2:3648
+534:2:3653
+535:2:3657
+536:2:3658
+537:2:3666
+538:2:3667
+539:2:3671
+540:2:3672
+541:2:3666
+542:2:3667
+543:2:3671
+544:2:3672
+545:2:3680
+546:2:3685
+547:2:3686
+548:2:3697
+549:2:3698
+550:2:3699
+551:2:3710
+552:2:3715
+553:2:3716
+554:2:3727
+555:2:3728
+556:2:3729
+557:2:3727
+558:2:3728
+559:2:3729
+560:2:3740
+561:2:3751
+562:2:3752
+563:0:4630
+564:2:3295
+565:0:4630
+566:2:3759
+567:2:3760
+568:2:3764
+569:2:3765
+570:2:3773
+571:2:3774
+572:2:3778
+573:2:3779
+574:2:3787
+575:2:3792
+576:2:3796
+577:2:3797
+578:2:3805
+579:2:3806
+580:2:3810
+581:2:3811
+582:2:3805
+583:2:3806
+584:2:3810
+585:2:3811
+586:2:3819
+587:2:3824
+588:2:3825
+589:2:3836
+590:2:3837
+591:2:3838
+592:2:3849
+593:2:3854
+594:2:3855
+595:2:3866
+596:2:3867
+597:2:3868
+598:2:3866
+599:2:3867
+600:2:3868
+601:2:3879
+602:0:4630
+603:2:3295
+604:0:4630
+605:2:3888
+606:2:3889
+607:2:3893
+608:2:3894
+609:2:3902
+610:2:3903
+611:2:3907
+612:2:3908
+613:2:3916
+614:2:3921
+615:2:3925
+616:2:3926
+617:2:3934
+618:2:3935
+619:2:3939
+620:2:3940
+621:2:3934
+622:2:3935
+623:2:3939
+624:2:3940
+625:2:3948
+626:2:3953
+627:2:3954
+628:2:3965
+629:2:3966
+630:2:3967
+631:2:3978
+632:2:3983
+633:2:3984
+634:2:3995
+635:2:3996
+636:2:3997
+637:2:3995
+638:2:3996
+639:2:3997
+640:2:4008
+641:2:4015
+642:0:4630
+643:2:3295
+644:0:4630
+645:2:4019
+646:2:4020
+647:2:4021
+648:2:4033
+649:2:4034
+650:2:4038
+651:2:4039
+652:2:4047
+653:2:4052
+654:2:4056
+655:2:4057
+656:2:4065
+657:2:4066
+658:2:4070
+659:2:4071
+660:2:4065
+661:2:4066
+662:2:4070
+663:2:4071
+664:2:4079
+665:2:4084
+666:2:4085
+667:2:4096
+668:2:4097
+669:2:4098
+670:2:4109
+671:2:4114
+672:2:4115
+673:2:4126
+674:2:4127
+675:2:4128
+676:2:4126
+677:2:4127
+678:2:4128
+679:2:4139
+680:2:4149
+681:2:4150
+682:0:4630
+683:2:3295
+684:0:4630
+685:2:4156
+686:0:4630
+687:2:4460
+688:2:4461
+689:2:4465
+690:2:4469
+691:2:4470
+692:2:4474
+693:2:4482
+694:2:4483
+695:2:4487
+696:2:4491
+697:2:4492
+698:2:4487
+699:2:4491
+700:2:4492
+701:2:4496
+702:2:4503
+703:2:4510
+704:2:4511
+705:2:4518
+706:2:4523
+707:2:4530
+708:2:4531
+709:2:4530
+710:2:4531
+711:2:4538
+712:2:4542
+713:0:4630
+714:2:4158
+715:2:4168
+716:0:4630
+717:2:3295
+718:0:4630
+719:2:4159
+720:2:4160
+721:0:4630
+722:2:3295
+723:0:4630
+724:2:4164
+725:0:4630
+726:2:4172
+727:0:4630
+728:2:3292
+729:0:4630
+730:2:3294
+731:0:4630
+732:2:3295
+733:0:4630
+734:2:3296
+735:2:3297
+736:2:3301
+737:2:3302
+738:2:3310
+739:2:3311
+740:2:3315
+741:2:3316
+742:2:3324
+743:2:3329
+744:2:3333
+745:2:3334
+746:2:3342
+747:2:3343
+748:2:3347
+749:2:3348
+750:2:3342
+751:2:3343
+752:2:3344
+753:2:3356
+754:2:3361
+755:2:3362
+756:2:3373
+757:2:3374
+758:2:3375
+759:2:3386
+760:2:3391
+761:2:3392
+762:2:3403
+763:2:3404
+764:2:3405
+765:2:3403
+766:2:3404
+767:2:3405
+768:2:3416
+769:2:3424
+770:0:4630
+771:2:3295
+772:0:4630
+773:1:2
+774:0:4630
+775:1:8
+776:0:4630
+777:1:9
+778:0:4630
+779:1:10
+780:0:4630
+781:1:11
+782:0:4630
+783:1:12
+784:1:13
+785:1:17
+786:1:18
+787:1:26
+788:1:27
+789:1:31
+790:1:32
+791:1:40
+792:1:45
+793:1:49
+794:1:50
+795:1:58
+796:1:59
+797:1:63
+798:1:64
+799:1:58
+800:1:59
+801:1:63
+802:1:64
+803:1:72
+804:1:77
+805:1:78
+806:1:89
+807:1:90
+808:1:91
+809:1:102
+810:1:107
+811:1:108
+812:1:119
+813:1:120
+814:1:121
+815:1:119
+816:1:120
+817:1:121
+818:1:132
+819:0:4630
+820:1:11
+821:0:4630
+822:1:141
+823:1:142
+824:0:4630
+825:1:11
+826:0:4630
+827:1:148
+828:1:149
+829:1:153
+830:1:154
+831:1:162
+832:1:163
+833:1:167
+834:1:168
+835:1:176
+836:1:181
+837:1:185
+838:1:186
+839:1:194
+840:1:195
+841:1:199
+842:1:200
+843:1:194
+844:1:195
+845:1:199
+846:1:200
+847:1:208
+848:1:213
+849:1:214
+850:1:225
+851:1:226
+852:1:227
+853:1:238
+854:1:243
+855:1:244
+856:1:255
+857:1:256
+858:1:257
+859:1:255
+860:1:256
+861:1:257
+862:1:268
+863:0:4630
+864:1:11
+865:0:4630
+866:1:277
+867:1:278
+868:1:282
+869:1:283
+870:1:291
+871:1:292
+872:1:296
+873:1:297
+874:1:305
+875:1:310
+876:1:314
+877:1:315
+878:1:323
+879:1:324
+880:1:328
+881:1:329
+882:1:323
+883:1:324
+884:1:328
+885:1:329
+886:1:337
+887:1:342
+888:1:343
+889:1:354
+890:1:355
+891:1:356
+892:1:367
+893:1:372
+894:1:373
+895:1:384
+896:1:385
+897:1:386
+898:1:384
+899:1:385
+900:1:386
+901:1:397
+902:1:404
+903:0:4630
+904:1:11
+905:0:4630
+906:1:540
+907:1:544
+908:1:545
+909:1:549
+910:1:550
+911:1:558
+912:1:566
+913:1:567
+914:1:571
+915:1:575
+916:1:576
+917:1:571
+918:1:575
+919:1:576
+920:1:580
+921:1:587
+922:1:594
+923:1:595
+924:1:602
+925:1:607
+926:1:614
+927:1:615
+928:1:614
+929:1:615
+930:1:622
+931:0:4630
+932:1:11
+933:0:4630
+934:2:3428
+935:2:3432
+936:2:3433
+937:2:3437
+938:2:3441
+939:2:3442
+940:2:3446
+941:2:3454
+942:2:3455
+943:2:3459
+944:2:3463
+945:2:3464
+946:2:3459
+947:2:3460
+948:2:3468
+949:0:4630
+950:2:3295
+951:0:4630
+952:2:3476
+953:2:3477
+954:2:3478
+955:0:4630
+956:2:3295
+957:0:4630
+958:2:3483
+959:0:4630
+960:2:4187
+961:2:4188
+962:2:4192
+963:2:4196
+964:2:4197
+965:2:4201
+966:2:4206
+967:2:4214
+968:2:4218
+969:2:4219
+970:2:4214
+971:2:4218
+972:2:4219
+973:2:4223
+974:2:4230
+975:2:4237
+976:2:4238
+977:2:4245
+978:2:4250
+979:2:4257
+980:2:4258
+981:2:4257
+982:2:4258
+983:2:4265
+984:2:4269
+985:0:4630
+986:2:3485
+987:2:4168
+988:0:4630
+989:2:3295
+990:0:4630
+991:2:3486
+992:0:4630
+993:2:3295
+994:0:4630
+995:2:3489
+996:2:3490
+997:2:3494
+998:2:3495
+999:2:3503
+1000:2:3504
+1001:2:3508
+1002:2:3509
+1003:2:3517
+1004:2:3522
+1005:2:3526
+1006:2:3527
+1007:2:3535
+1008:2:3536
+1009:2:3540
+1010:2:3541
+1011:2:3535
+1012:2:3536
+1013:2:3540
+1014:2:3541
+1015:2:3549
+1016:2:3554
+1017:2:3555
+1018:2:3566
+1019:2:3567
+1020:2:3568
+1021:2:3579
+1022:2:3584
+1023:2:3585
+1024:2:3596
+1025:2:3597
+1026:2:3598
+1027:2:3596
+1028:2:3597
+1029:2:3598
+1030:2:3609
+1031:2:3616
+1032:0:4630
+1033:2:3295
+1034:0:4630
+1035:2:3620
+1036:2:3621
+1037:2:3622
+1038:2:3634
+1039:2:3635
+1040:2:3639
+1041:2:3640
+1042:2:3648
+1043:2:3653
+1044:2:3657
+1045:2:3658
+1046:2:3666
+1047:2:3667
+1048:2:3671
+1049:2:3672
+1050:2:3666
+1051:2:3667
+1052:2:3671
+1053:2:3672
+1054:2:3680
+1055:2:3685
+1056:2:3686
+1057:2:3697
+1058:2:3698
+1059:2:3699
+1060:2:3710
+1061:2:3715
+1062:2:3716
+1063:2:3727
+1064:2:3728
+1065:2:3729
+1066:2:3727
+1067:2:3728
+1068:2:3729
+1069:2:3740
+1070:2:3749
+1071:0:4630
+1072:2:3295
+1073:0:4630
+1074:2:3755
+1075:0:4630
+1076:2:4278
+1077:2:4279
+1078:2:4283
+1079:2:4287
+1080:2:4288
+1081:2:4292
+1082:2:4300
+1083:2:4301
+1084:2:4305
+1085:2:4309
+1086:2:4310
+1087:2:4305
+1088:2:4309
+1089:2:4310
+1090:2:4314
+1091:2:4321
+1092:2:4328
+1093:2:4329
+1094:2:4336
+1095:2:4341
+1096:2:4348
+1097:2:4349
+1098:2:4348
+1099:2:4349
+1100:2:4356
+1101:2:4360
+1102:0:4630
+1103:2:3757
+1104:2:3758
+1105:0:4630
+1106:2:3295
+1107:0:4630
+1108:2:3759
+1109:2:3760
+1110:2:3764
+1111:2:3765
+1112:2:3773
+1113:2:3774
+1114:2:3778
+1115:2:3779
+1116:2:3787
+1117:2:3792
+1118:2:3796
+1119:2:3797
+1120:2:3805
+1121:2:3806
+1122:2:3810
+1123:2:3811
+1124:2:3805
+1125:2:3806
+1126:2:3810
+1127:2:3811
+1128:2:3819
+1129:2:3824
+1130:2:3825
+1131:2:3836
+1132:2:3837
+1133:2:3838
+1134:2:3849
+1135:2:3854
+1136:2:3855
+1137:2:3866
+1138:2:3867
+1139:2:3868
+1140:2:3866
+1141:2:3867
+1142:2:3868
+1143:2:3879
+1144:0:4630
+1145:2:3295
+1146:0:4630
+1147:2:3620
+1148:2:3621
+1149:2:3625
+1150:2:3626
+1151:2:3634
+1152:2:3635
+1153:2:3639
+1154:2:3640
+1155:2:3648
+1156:2:3653
+1157:2:3657
+1158:2:3658
+1159:2:3666
+1160:2:3667
+1161:2:3671
+1162:2:3672
+1163:2:3666
+1164:2:3667
+1165:2:3671
+1166:2:3672
+1167:2:3680
+1168:2:3685
+1169:2:3686
+1170:2:3697
+1171:2:3698
+1172:2:3699
+1173:2:3710
+1174:2:3715
+1175:2:3716
+1176:2:3727
+1177:2:3728
+1178:2:3729
+1179:2:3727
+1180:2:3728
+1181:2:3729
+1182:2:3740
+1183:2:3749
+1184:0:4630
+1185:2:3295
+1186:0:4630
+1187:2:3755
+1188:0:4630
+1189:2:4278
+1190:2:4279
+1191:2:4283
+1192:2:4287
+1193:2:4288
+1194:2:4292
+1195:2:4300
+1196:2:4301
+1197:2:4305
+1198:2:4309
+1199:2:4310
+1200:2:4305
+1201:2:4309
+1202:2:4310
+1203:2:4314
+1204:2:4321
+1205:2:4328
+1206:2:4329
+1207:2:4336
+1208:2:4341
+1209:2:4348
+1210:2:4349
+1211:2:4348
+1212:2:4349
+1213:2:4356
+1214:2:4360
+1215:0:4630
+1216:2:3757
+1217:2:3758
+1218:0:4630
+1219:2:3295
+1220:0:4630
+1221:2:3620
+1222:2:3621
+1223:2:3625
+1224:2:3626
+1225:2:3634
+1226:2:3635
+1227:2:3639
+1228:2:3640
+1229:2:3648
+1230:2:3653
+1231:2:3657
+1232:2:3658
+1233:2:3666
+1234:2:3667
+1235:2:3671
+1236:2:3672
+1237:2:3666
+1238:2:3667
+1239:2:3671
+1240:2:3672
+1241:2:3680
+1242:2:3685
+1243:2:3686
+1244:2:3697
+1245:2:3698
+1246:2:3699
+1247:2:3710
+1248:2:3715
+1249:2:3716
+1250:2:3727
+1251:2:3728
+1252:2:3729
+1253:2:3727
+1254:2:3728
+1255:2:3729
+1256:2:3740
+1257:2:3749
+1258:0:4630
+1259:2:3295
+1260:0:4630
+1261:2:3755
+1262:0:4630
+1263:2:4278
+1264:2:4279
+1265:2:4283
+1266:2:4287
+1267:2:4288
+1268:2:4292
+1269:2:4300
+1270:2:4301
+1271:2:4305
+1272:2:4309
+1273:2:4310
+1274:2:4305
+1275:2:4309
+1276:2:4310
+1277:2:4314
+1278:2:4321
+1279:2:4328
+1280:2:4329
+1281:2:4336
+1282:2:4341
+1283:2:4348
+1284:2:4349
+1285:2:4348
+1286:2:4349
+1287:2:4356
+1288:2:4360
+1289:0:4630
+1290:1:632
+1291:1:633
+1292:1:637
+1293:1:638
+1294:1:646
+1295:1:647
+1296:1:651
+1297:1:652
+1298:1:660
+1299:1:665
+1300:1:669
+1301:1:670
+1302:1:678
+1303:1:679
+1304:1:683
+1305:1:684
+1306:1:678
+1307:1:679
+1308:1:683
+1309:1:684
+1310:1:692
+1311:1:697
+1312:1:698
+1313:1:709
+1314:1:710
+1315:1:711
+1316:1:722
+1317:1:727
+1318:1:728
+1319:1:739
+1320:1:740
+1321:1:741
+1322:1:739
+1323:1:747
+1324:1:748
+1325:1:752
+1326:0:4630
+1327:1:11
+1328:0:4630
+1329:2:3757
+1330:2:3758
+1331:0:4630
+1332:2:3295
+1333:0:4630
+1334:2:3620
+1335:2:3621
+1336:2:3625
+1337:2:3626
+1338:2:3634
+1339:2:3635
+1340:2:3639
+1341:2:3640
+1342:2:3648
+1343:2:3653
+1344:2:3657
+1345:2:3658
+1346:2:3666
+1347:2:3667
+1348:2:3671
+1349:2:3672
+1350:2:3666
+1351:2:3667
+1352:2:3671
+1353:2:3672
+1354:2:3680
+1355:2:3685
+1356:2:3686
+1357:2:3697
+1358:2:3698
+1359:2:3699
+1360:2:3710
+1361:2:3715
+1362:2:3716
+1363:2:3727
+1364:2:3728
+1365:2:3729
+1366:2:3727
+1367:2:3728
+1368:2:3729
+1369:2:3740
+1370:2:3749
+1371:0:4630
+1372:2:3295
+1373:0:4630
+1374:2:3755
+1375:0:4630
+1376:1:761
+1377:1:764
+1378:1:765
+1379:0:4630
+1380:1:11
+1381:0:4630
+1382:2:4278
+1383:2:4279
+1384:2:4283
+1385:2:4287
+1386:2:4288
+1387:2:4292
+1388:2:4300
+1389:2:4301
+1390:2:4305
+1391:2:4309
+1392:2:4310
+1393:2:4305
+1394:2:4309
+1395:2:4310
+1396:2:4314
+1397:2:4321
+1398:2:4328
+1399:2:4329
+1400:2:4336
+1401:2:4341
+1402:2:4348
+1403:2:4349
+1404:2:4348
+1405:2:4349
+1406:2:4356
+1407:2:4360
+1408:0:4630
+1409:2:3757
+1410:2:3758
+1411:0:4630
+1412:2:3295
+1413:0:4630
+1414:2:3620
+1415:2:3621
+1416:2:3625
+1417:2:3626
+1418:2:3634
+1419:2:3635
+1420:2:3639
+1421:2:3640
+1422:2:3648
+1423:2:3653
+1424:2:3657
+1425:2:3658
+1426:2:3666
+1427:2:3667
+1428:2:3671
+1429:2:3672
+1430:2:3666
+1431:2:3667
+1432:2:3671
+1433:2:3672
+1434:2:3680
+1435:2:3685
+1436:2:3686
+1437:2:3697
+1438:2:3698
+1439:2:3699
+1440:2:3710
+1441:2:3715
+1442:2:3716
+1443:2:3727
+1444:2:3728
+1445:2:3729
+1446:2:3727
+1447:2:3728
+1448:2:3729
+1449:2:3740
+1450:2:3749
+1451:0:4630
+1452:2:3295
+1453:0:4630
+1454:2:3759
+1455:2:3760
+1456:2:3764
+1457:2:3765
+1458:2:3773
+1459:2:3774
+1460:2:3778
+1461:2:3779
+1462:2:3787
+1463:2:3792
+1464:2:3796
+1465:2:3797
+1466:2:3805
+1467:2:3806
+1468:2:3810
+1469:2:3811
+1470:2:3805
+1471:2:3806
+1472:2:3810
+1473:2:3811
+1474:2:3819
+1475:2:3824
+1476:2:3825
+1477:2:3836
+1478:2:3837
+1479:2:3838
+1480:2:3849
+1481:2:3854
+1482:2:3855
+1483:2:3866
+1484:2:3867
+1485:2:3868
+1486:2:3866
+1487:2:3867
+1488:2:3868
+1489:2:3879
+1490:0:4630
+1491:2:3295
+1492:0:4630
+1493:2:3755
+1494:0:4630
+1495:2:4278
+1496:2:4279
+1497:2:4283
+1498:2:4287
+1499:2:4288
+1500:2:4292
+1501:2:4300
+1502:2:4301
+1503:2:4305
+1504:2:4309
+1505:2:4310
+1506:2:4305
+1507:2:4309
+1508:2:4310
+1509:2:4314
+1510:2:4321
+1511:2:4328
+1512:2:4329
+1513:2:4336
+1514:2:4341
+1515:2:4348
+1516:2:4349
+1517:2:4348
+1518:2:4349
+1519:2:4356
+1520:2:4360
+1521:0:4630
+1522:1:768
+1523:1:769
+1524:1:773
+1525:1:774
+1526:1:782
+1527:1:783
+1528:1:787
+1529:1:788
+1530:1:796
+1531:1:801
+1532:1:805
+1533:1:806
+1534:1:814
+1535:1:815
+1536:1:819
+1537:1:820
+1538:1:814
+1539:1:815
+1540:1:819
+1541:1:820
+1542:1:828
+1543:1:833
+1544:1:834
+1545:1:845
+1546:1:846
+1547:1:847
+1548:1:858
+1549:1:863
+1550:1:864
+1551:1:875
+1552:1:876
+1553:1:877
+1554:1:875
+1555:1:883
+1556:1:884
+1557:1:888
+1558:0:4630
+1559:1:11
+1560:0:4630
+1561:2:3757
+1562:2:3758
+1563:0:4630
+1564:2:3295
+1565:0:4630
+1566:2:3620
+1567:2:3621
+1568:2:3625
+1569:2:3626
+1570:2:3634
+1571:2:3635
+1572:2:3639
+1573:2:3640
+1574:2:3648
+1575:2:3653
+1576:2:3657
+1577:2:3658
+1578:2:3666
+1579:2:3667
+1580:2:3671
+1581:2:3672
+1582:2:3666
+1583:2:3667
+1584:2:3671
+1585:2:3672
+1586:2:3680
+1587:2:3685
+1588:2:3686
+1589:2:3697
+1590:2:3698
+1591:2:3699
+1592:2:3710
+1593:2:3715
+1594:2:3716
+1595:2:3727
+1596:2:3728
+1597:2:3729
+1598:2:3727
+1599:2:3728
+1600:2:3729
+1601:2:3740
+1602:2:3749
+1603:0:4630
+1604:2:3295
+1605:0:4630
+1606:2:3755
+1607:0:4630
+1608:2:4278
+1609:2:4279
+1610:2:4283
+1611:2:4287
+1612:2:4288
+1613:2:4292
+1614:2:4300
+1615:2:4301
+1616:2:4305
+1617:2:4309
+1618:2:4310
+1619:2:4305
+1620:2:4309
+1621:2:4310
+1622:2:4314
+1623:2:4321
+1624:2:4328
+1625:2:4329
+1626:2:4336
+1627:2:4341
+1628:2:4348
+1629:2:4349
+1630:2:4348
+1631:2:4349
+1632:2:4356
+1633:2:4360
+1634:0:4630
+1635:1:1028
+1636:1:1029
+1637:1:1033
+1638:1:1034
+1639:1:1042
+1640:1:1043
+1641:1:1047
+1642:1:1048
+1643:1:1056
+1644:1:1061
+1645:1:1065
+1646:1:1066
+1647:1:1074
+1648:1:1075
+1649:1:1079
+1650:1:1080
+1651:1:1074
+1652:1:1075
+1653:1:1079
+1654:1:1080
+1655:1:1088
+1656:1:1093
+1657:1:1094
+1658:1:1105
+1659:1:1106
+1660:1:1107
+1661:1:1118
+1662:1:1123
+1663:1:1124
+1664:1:1135
+1665:1:1136
+1666:1:1137
+1667:1:1135
+1668:1:1143
+1669:1:1144
+1670:1:1148
+1671:1:1155
+1672:1:1159
+1673:0:4630
+1674:1:11
+1675:0:4630
+1676:2:3757
+1677:2:3758
+1678:0:4630
+1679:2:3295
+1680:0:4630
+1681:2:3620
+1682:2:3621
+1683:2:3625
+1684:2:3626
+1685:2:3634
+1686:2:3635
+1687:2:3639
+1688:2:3640
+1689:2:3648
+1690:2:3653
+1691:2:3657
+1692:2:3658
+1693:2:3666
+1694:2:3667
+1695:2:3671
+1696:2:3672
+1697:2:3666
+1698:2:3667
+1699:2:3671
+1700:2:3672
+1701:2:3680
+1702:2:3685
+1703:2:3686
+1704:2:3697
+1705:2:3698
+1706:2:3699
+1707:2:3710
+1708:2:3715
+1709:2:3716
+1710:2:3727
+1711:2:3728
+1712:2:3729
+1713:2:3727
+1714:2:3728
+1715:2:3729
+1716:2:3740
+1717:2:3749
+1718:0:4630
+1719:2:3295
+1720:0:4630
+1721:2:3755
+1722:0:4630
+1723:1:1160
+1724:1:1161
+1725:1:1165
+1726:1:1166
+1727:1:1174
+1728:1:1175
+1729:1:1176
+1730:1:1188
+1731:1:1193
+1732:1:1197
+1733:1:1198
+1734:1:1206
+1735:1:1207
+1736:1:1211
+1737:1:1212
+1738:1:1206
+1739:1:1207
+1740:1:1211
+1741:1:1212
+1742:1:1220
+1743:1:1225
+1744:1:1226
+1745:1:1237
+1746:1:1238
+1747:1:1239
+1748:1:1250
+1749:1:1255
+1750:1:1256
+1751:1:1267
+1752:1:1268
+1753:1:1269
+1754:1:1267
+1755:1:1275
+1756:1:1276
+1757:1:1280
+1758:0:4630
+1759:1:11
+1760:0:4630
+1761:2:4278
+1762:2:4279
+1763:2:4283
+1764:2:4287
+1765:2:4288
+1766:2:4292
+1767:2:4300
+1768:2:4301
+1769:2:4305
+1770:2:4309
+1771:2:4310
+1772:2:4305
+1773:2:4309
+1774:2:4310
+1775:2:4314
+1776:2:4321
+1777:2:4328
+1778:2:4329
+1779:2:4336
+1780:2:4341
+1781:2:4348
+1782:2:4349
+1783:2:4348
+1784:2:4349
+1785:2:4356
+1786:2:4360
+1787:0:4630
+1788:2:3757
+1789:2:3758
+1790:0:4630
+1791:2:3295
+1792:0:4630
+1793:2:3620
+1794:2:3621
+1795:2:3625
+1796:2:3626
+1797:2:3634
+1798:2:3635
+1799:2:3639
+1800:2:3640
+1801:2:3648
+1802:2:3653
+1803:2:3657
+1804:2:3658
+1805:2:3666
+1806:2:3667
+1807:2:3671
+1808:2:3672
+1809:2:3666
+1810:2:3667
+1811:2:3671
+1812:2:3672
+1813:2:3680
+1814:2:3685
+1815:2:3686
+1816:2:3697
+1817:2:3698
+1818:2:3699
+1819:2:3710
+1820:2:3715
+1821:2:3716
+1822:2:3727
+1823:2:3728
+1824:2:3729
+1825:2:3727
+1826:2:3728
+1827:2:3729
+1828:2:3740
+1829:2:3749
+1830:0:4630
+1831:2:3295
+1832:0:4630
+1833:2:3755
+1834:0:4630
+1835:1:1289
+1836:0:4630
+1837:2:4278
+1838:2:4279
+1839:2:4283
+1840:2:4287
+1841:2:4288
+1842:2:4292
+1843:2:4300
+1844:2:4301
+1845:2:4305
+1846:2:4309
+1847:2:4310
+1848:2:4305
+1849:2:4309
+1850:2:4310
+1851:2:4314
+1852:2:4321
+1853:2:4328
+1854:2:4329
+1855:2:4336
+1856:2:4341
+1857:2:4348
+1858:2:4349
+1859:2:4348
+1860:2:4349
+1861:2:4356
+1862:2:4360
+1863:0:4630
+1864:2:3757
+1865:2:3758
+1866:0:4630
+1867:2:3295
+1868:0:4630
+1869:2:3620
+1870:2:3621
+1871:2:3625
+1872:2:3626
+1873:2:3634
+1874:2:3635
+1875:2:3639
+1876:2:3640
+1877:2:3648
+1878:2:3653
+1879:2:3657
+1880:2:3658
+1881:2:3666
+1882:2:3667
+1883:2:3671
+1884:2:3672
+1885:2:3666
+1886:2:3667
+1887:2:3671
+1888:2:3672
+1889:2:3680
+1890:2:3685
+1891:2:3686
+1892:2:3697
+1893:2:3698
+1894:2:3699
+1895:2:3710
+1896:2:3715
+1897:2:3716
+1898:2:3727
+1899:2:3728
+1900:2:3729
+1901:2:3727
+1902:2:3728
+1903:2:3729
+1904:2:3740
+1905:2:3749
+1906:0:4630
+1907:2:3295
+1908:0:4630
+1909:2:3759
+1910:2:3760
+1911:2:3764
+1912:2:3765
+1913:2:3773
+1914:2:3774
+1915:2:3778
+1916:2:3779
+1917:2:3787
+1918:2:3792
+1919:2:3796
+1920:2:3797
+1921:2:3805
+1922:2:3806
+1923:2:3810
+1924:2:3811
+1925:2:3805
+1926:2:3806
+1927:2:3810
+1928:2:3811
+1929:2:3819
+1930:2:3824
+1931:2:3825
+1932:2:3836
+1933:2:3837
+1934:2:3838
+1935:2:3849
+1936:2:3854
+1937:2:3855
+1938:2:3866
+1939:2:3867
+1940:2:3868
+1941:2:3866
+1942:2:3867
+1943:2:3868
+1944:2:3879
+1945:0:4630
+1946:2:3295
+1947:0:4630
+1948:2:3755
+1949:0:4630
+1950:2:4278
+1951:2:4279
+1952:2:4283
+1953:2:4287
+1954:2:4288
+1955:2:4292
+1956:2:4300
+1957:2:4301
+1958:2:4305
+1959:2:4309
+1960:2:4310
+1961:2:4305
+1962:2:4309
+1963:2:4310
+1964:2:4314
+1965:2:4321
+1966:2:4328
+1967:2:4329
+1968:2:4336
+1969:2:4341
+1970:2:4348
+1971:2:4349
+1972:2:4348
+1973:2:4349
+1974:2:4356
+1975:2:4360
+1976:0:4630
+1977:1:3023
+1978:1:3027
+1979:1:3028
+1980:1:3036
+1981:1:3037
+1982:1:3041
+1983:1:3042
+1984:1:3050
+1985:1:3055
+1986:1:3059
+1987:1:3060
+1988:1:3068
+1989:1:3069
+1990:1:3073
+1991:1:3074
+1992:1:3068
+1993:1:3069
+1994:1:3073
+1995:1:3074
+1996:1:3082
+1997:1:3087
+1998:1:3088
+1999:1:3099
+2000:1:3100
+2001:1:3101
+2002:1:3112
+2003:1:3117
+2004:1:3118
+2005:1:3129
+2006:1:3130
+2007:1:3131
+2008:1:3129
+2009:1:3137
+2010:1:3138
+2011:1:3142
+2012:1:3146
+2013:0:4630
+2014:2:3757
+2015:2:3758
+2016:0:4630
+2017:2:3295
+2018:0:4630
+2019:2:3620
+2020:2:3621
+2021:2:3625
+2022:2:3626
+2023:2:3634
+2024:2:3635
+2025:2:3639
+2026:2:3640
+2027:2:3648
+2028:2:3653
+2029:2:3657
+2030:2:3658
+2031:2:3666
+2032:2:3667
+2033:2:3671
+2034:2:3672
+2035:2:3666
+2036:2:3667
+2037:2:3671
+2038:2:3672
+2039:2:3680
+2040:2:3685
+2041:2:3686
+2042:2:3697
+2043:2:3698
+2044:2:3699
+2045:2:3710
+2046:2:3715
+2047:2:3716
+2048:2:3727
+2049:2:3728
+2050:2:3729
+2051:2:3727
+2052:2:3728
+2053:2:3729
+2054:2:3740
+2055:2:3749
+2056:0:4630
+2057:2:3295
+2058:0:4630
+2059:2:3755
+2060:0:4630
+2061:2:4278
+2062:2:4279
+2063:2:4283
+2064:2:4287
+2065:2:4288
+2066:2:4292
+2067:2:4300
+2068:2:4301
+2069:2:4305
+2070:2:4309
+2071:2:4310
+2072:2:4305
+2073:2:4309
+2074:2:4310
+2075:2:4314
+2076:2:4321
+2077:2:4328
+2078:2:4329
+2079:2:4336
+2080:2:4341
+2081:2:4348
+2082:2:4349
+2083:2:4348
+2084:2:4349
+2085:2:4356
+2086:2:4360
+2087:0:4630
+2088:1:1291
+2089:1:1292
+2090:0:4628
+2091:1:11
+2092:0:4634
+2093:1:3146
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.define b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.define
new file mode 100644 (file)
index 0000000..710f29d
--- /dev/null
@@ -0,0 +1 @@
+#define NO_WMB
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.log b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.log
new file mode 100644 (file)
index 0000000..5c504a7
--- /dev/null
@@ -0,0 +1,505 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_wmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    5131 States=    1e+06 Transitions= 5.44e+08 Memory=   550.432        t=    664 R=   2e+03
+Depth=    5131 States=    2e+06 Transitions= 1.26e+09 Memory=   634.318        t= 1.56e+03 R=   1e+03
+Depth=    5422 States=    3e+06 Transitions= 1.93e+09 Memory=   718.303        t= 2.43e+03 R=   1e+03
+pan: resizing hashtable to -w22..  done
+Depth=    5422 States=    4e+06 Transitions= 2.58e+09 Memory=   833.311        t= 3.23e+03 R=   1e+03
+Depth=    5422 States=    5e+06 Transitions= 3.25e+09 Memory=   917.295        t= 4.07e+03 R=   1e+03
+pan: claim violated! (at depth 1420)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 5422, errors: 1
+  5523708 states, stored
+3.5450503e+09 states, matched
+3.550574e+09 transitions (= stored+matched)
+2.0146324e+10 atomic steps
+hash conflicts: 2.4095106e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+  611.067      equivalent memory usage for states (stored*(State-vector + overhead))
+  471.813      actual memory usage for states (compression: 77.21%)
+               state-vector as stored = 62 byte + 28 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+  961.240      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 82, "(1)"
+       line 438, "pan.___", state 112, "(1)"
+       line 442, "pan.___", state 125, "(1)"
+       line 597, "pan.___", state 146, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 153, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 185, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 199, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 218, "(1)"
+       line 438, "pan.___", state 248, "(1)"
+       line 442, "pan.___", state 261, "(1)"
+       line 411, "pan.___", state 282, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 314, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 347, "(1)"
+       line 438, "pan.___", state 377, "(1)"
+       line 442, "pan.___", state 390, "(1)"
+       line 411, "pan.___", state 413, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 415, "(1)"
+       line 411, "pan.___", state 416, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 416, "else"
+       line 411, "pan.___", state 419, "(1)"
+       line 415, "pan.___", state 427, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 429, "(1)"
+       line 415, "pan.___", state 430, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 430, "else"
+       line 415, "pan.___", state 433, "(1)"
+       line 415, "pan.___", state 434, "(1)"
+       line 415, "pan.___", state 434, "(1)"
+       line 413, "pan.___", state 439, "((i<1))"
+       line 413, "pan.___", state 439, "((i>=1))"
+       line 420, "pan.___", state 445, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 447, "(1)"
+       line 420, "pan.___", state 448, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 448, "else"
+       line 420, "pan.___", state 451, "(1)"
+       line 420, "pan.___", state 452, "(1)"
+       line 420, "pan.___", state 452, "(1)"
+       line 424, "pan.___", state 459, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 461, "(1)"
+       line 424, "pan.___", state 462, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 462, "else"
+       line 424, "pan.___", state 465, "(1)"
+       line 424, "pan.___", state 466, "(1)"
+       line 424, "pan.___", state 466, "(1)"
+       line 422, "pan.___", state 471, "((i<2))"
+       line 422, "pan.___", state 471, "((i>=2))"
+       line 429, "pan.___", state 478, "(1)"
+       line 429, "pan.___", state 479, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 479, "else"
+       line 429, "pan.___", state 482, "(1)"
+       line 429, "pan.___", state 483, "(1)"
+       line 429, "pan.___", state 483, "(1)"
+       line 433, "pan.___", state 491, "(1)"
+       line 433, "pan.___", state 492, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 492, "else"
+       line 433, "pan.___", state 495, "(1)"
+       line 433, "pan.___", state 496, "(1)"
+       line 433, "pan.___", state 496, "(1)"
+       line 431, "pan.___", state 501, "((i<1))"
+       line 431, "pan.___", state 501, "((i>=1))"
+       line 438, "pan.___", state 508, "(1)"
+       line 438, "pan.___", state 509, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 509, "else"
+       line 438, "pan.___", state 512, "(1)"
+       line 438, "pan.___", state 513, "(1)"
+       line 438, "pan.___", state 513, "(1)"
+       line 442, "pan.___", state 521, "(1)"
+       line 442, "pan.___", state 522, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 522, "else"
+       line 442, "pan.___", state 525, "(1)"
+       line 442, "pan.___", state 526, "(1)"
+       line 442, "pan.___", state 526, "(1)"
+       line 440, "pan.___", state 531, "((i<2))"
+       line 440, "pan.___", state 531, "((i>=2))"
+       line 450, "pan.___", state 535, "(1)"
+       line 450, "pan.___", state 535, "(1)"
+       line 597, "pan.___", state 538, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 597, "pan.___", state 539, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 597, "pan.___", state 540, "(1)"
+       line 272, "pan.___", state 544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 555, "(1)"
+       line 280, "pan.___", state 566, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 575, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 591, "(1)"
+       line 253, "pan.___", state 599, "(1)"
+       line 257, "pan.___", state 611, "(1)"
+       line 261, "pan.___", state 619, "(1)"
+       line 411, "pan.___", state 637, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 651, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 669, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 683, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 702, "(1)"
+       line 433, "pan.___", state 715, "(1)"
+       line 438, "pan.___", state 732, "(1)"
+       line 442, "pan.___", state 745, "(1)"
+       line 411, "pan.___", state 773, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 805, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 819, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 838, "(1)"
+       line 438, "pan.___", state 868, "(1)"
+       line 442, "pan.___", state 881, "(1)"
+       line 411, "pan.___", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 904, "(1)"
+       line 411, "pan.___", state 905, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 905, "else"
+       line 411, "pan.___", state 908, "(1)"
+       line 415, "pan.___", state 916, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 918, "(1)"
+       line 415, "pan.___", state 919, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 919, "else"
+       line 415, "pan.___", state 922, "(1)"
+       line 415, "pan.___", state 923, "(1)"
+       line 415, "pan.___", state 923, "(1)"
+       line 413, "pan.___", state 928, "((i<1))"
+       line 413, "pan.___", state 928, "((i>=1))"
+       line 420, "pan.___", state 934, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 936, "(1)"
+       line 420, "pan.___", state 937, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 937, "else"
+       line 420, "pan.___", state 940, "(1)"
+       line 420, "pan.___", state 941, "(1)"
+       line 420, "pan.___", state 941, "(1)"
+       line 424, "pan.___", state 948, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 950, "(1)"
+       line 424, "pan.___", state 951, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 951, "else"
+       line 424, "pan.___", state 954, "(1)"
+       line 424, "pan.___", state 955, "(1)"
+       line 424, "pan.___", state 955, "(1)"
+       line 422, "pan.___", state 960, "((i<2))"
+       line 422, "pan.___", state 960, "((i>=2))"
+       line 429, "pan.___", state 967, "(1)"
+       line 429, "pan.___", state 968, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 968, "else"
+       line 429, "pan.___", state 971, "(1)"
+       line 429, "pan.___", state 972, "(1)"
+       line 429, "pan.___", state 972, "(1)"
+       line 433, "pan.___", state 980, "(1)"
+       line 433, "pan.___", state 981, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 981, "else"
+       line 433, "pan.___", state 984, "(1)"
+       line 433, "pan.___", state 985, "(1)"
+       line 433, "pan.___", state 985, "(1)"
+       line 431, "pan.___", state 990, "((i<1))"
+       line 431, "pan.___", state 990, "((i>=1))"
+       line 438, "pan.___", state 997, "(1)"
+       line 438, "pan.___", state 998, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 998, "else"
+       line 438, "pan.___", state 1001, "(1)"
+       line 438, "pan.___", state 1002, "(1)"
+       line 438, "pan.___", state 1002, "(1)"
+       line 442, "pan.___", state 1010, "(1)"
+       line 442, "pan.___", state 1011, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 1011, "else"
+       line 442, "pan.___", state 1014, "(1)"
+       line 442, "pan.___", state 1015, "(1)"
+       line 442, "pan.___", state 1015, "(1)"
+       line 440, "pan.___", state 1020, "((i<2))"
+       line 440, "pan.___", state 1020, "((i>=2))"
+       line 450, "pan.___", state 1024, "(1)"
+       line 450, "pan.___", state 1024, "(1)"
+       line 605, "pan.___", state 1028, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 1033, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1047, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1065, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1079, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1098, "(1)"
+       line 433, "pan.___", state 1111, "(1)"
+       line 438, "pan.___", state 1128, "(1)"
+       line 442, "pan.___", state 1141, "(1)"
+       line 411, "pan.___", state 1165, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1197, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1211, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1230, "(1)"
+       line 438, "pan.___", state 1260, "(1)"
+       line 442, "pan.___", state 1273, "(1)"
+       line 411, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1363, "(1)"
+       line 438, "pan.___", state 1393, "(1)"
+       line 442, "pan.___", state 1406, "(1)"
+       line 411, "pan.___", state 1427, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1492, "(1)"
+       line 438, "pan.___", state 1522, "(1)"
+       line 442, "pan.___", state 1535, "(1)"
+       line 272, "pan.___", state 1558, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1580, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1589, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1605, "(1)"
+       line 253, "pan.___", state 1613, "(1)"
+       line 257, "pan.___", state 1625, "(1)"
+       line 261, "pan.___", state 1633, "(1)"
+       line 411, "pan.___", state 1651, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1716, "(1)"
+       line 433, "pan.___", state 1729, "(1)"
+       line 438, "pan.___", state 1746, "(1)"
+       line 442, "pan.___", state 1759, "(1)"
+       line 411, "pan.___", state 1780, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1794, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1812, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1845, "(1)"
+       line 433, "pan.___", state 1858, "(1)"
+       line 438, "pan.___", state 1875, "(1)"
+       line 442, "pan.___", state 1888, "(1)"
+       line 411, "pan.___", state 1912, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1944, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1958, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1977, "(1)"
+       line 438, "pan.___", state 2007, "(1)"
+       line 442, "pan.___", state 2020, "(1)"
+       line 644, "pan.___", state 2041, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 2048, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2080, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2094, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2113, "(1)"
+       line 438, "pan.___", state 2143, "(1)"
+       line 442, "pan.___", state 2156, "(1)"
+       line 411, "pan.___", state 2177, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2209, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2242, "(1)"
+       line 438, "pan.___", state 2272, "(1)"
+       line 442, "pan.___", state 2285, "(1)"
+       line 411, "pan.___", state 2308, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2310, "(1)"
+       line 411, "pan.___", state 2311, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2311, "else"
+       line 411, "pan.___", state 2314, "(1)"
+       line 415, "pan.___", state 2322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2324, "(1)"
+       line 415, "pan.___", state 2325, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2325, "else"
+       line 415, "pan.___", state 2328, "(1)"
+       line 415, "pan.___", state 2329, "(1)"
+       line 415, "pan.___", state 2329, "(1)"
+       line 413, "pan.___", state 2334, "((i<1))"
+       line 413, "pan.___", state 2334, "((i>=1))"
+       line 420, "pan.___", state 2340, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2342, "(1)"
+       line 420, "pan.___", state 2343, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2343, "else"
+       line 420, "pan.___", state 2346, "(1)"
+       line 420, "pan.___", state 2347, "(1)"
+       line 420, "pan.___", state 2347, "(1)"
+       line 424, "pan.___", state 2354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2356, "(1)"
+       line 424, "pan.___", state 2357, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2357, "else"
+       line 424, "pan.___", state 2360, "(1)"
+       line 424, "pan.___", state 2361, "(1)"
+       line 424, "pan.___", state 2361, "(1)"
+       line 422, "pan.___", state 2366, "((i<2))"
+       line 422, "pan.___", state 2366, "((i>=2))"
+       line 429, "pan.___", state 2373, "(1)"
+       line 429, "pan.___", state 2374, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 2374, "else"
+       line 429, "pan.___", state 2377, "(1)"
+       line 429, "pan.___", state 2378, "(1)"
+       line 429, "pan.___", state 2378, "(1)"
+       line 433, "pan.___", state 2386, "(1)"
+       line 433, "pan.___", state 2387, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 2387, "else"
+       line 433, "pan.___", state 2390, "(1)"
+       line 433, "pan.___", state 2391, "(1)"
+       line 433, "pan.___", state 2391, "(1)"
+       line 431, "pan.___", state 2396, "((i<1))"
+       line 431, "pan.___", state 2396, "((i>=1))"
+       line 438, "pan.___", state 2403, "(1)"
+       line 438, "pan.___", state 2404, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 2404, "else"
+       line 438, "pan.___", state 2407, "(1)"
+       line 438, "pan.___", state 2408, "(1)"
+       line 438, "pan.___", state 2408, "(1)"
+       line 442, "pan.___", state 2416, "(1)"
+       line 442, "pan.___", state 2417, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 2417, "else"
+       line 442, "pan.___", state 2420, "(1)"
+       line 442, "pan.___", state 2421, "(1)"
+       line 442, "pan.___", state 2421, "(1)"
+       line 440, "pan.___", state 2426, "((i<2))"
+       line 440, "pan.___", state 2426, "((i>=2))"
+       line 450, "pan.___", state 2430, "(1)"
+       line 450, "pan.___", state 2430, "(1)"
+       line 644, "pan.___", state 2433, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 644, "pan.___", state 2434, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 644, "pan.___", state 2435, "(1)"
+       line 272, "pan.___", state 2439, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2461, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2470, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2486, "(1)"
+       line 253, "pan.___", state 2494, "(1)"
+       line 257, "pan.___", state 2506, "(1)"
+       line 261, "pan.___", state 2514, "(1)"
+       line 411, "pan.___", state 2532, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2546, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2564, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2578, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2597, "(1)"
+       line 433, "pan.___", state 2610, "(1)"
+       line 438, "pan.___", state 2627, "(1)"
+       line 442, "pan.___", state 2640, "(1)"
+       line 272, "pan.___", state 2664, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2673, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2686, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2695, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2711, "(1)"
+       line 253, "pan.___", state 2719, "(1)"
+       line 257, "pan.___", state 2731, "(1)"
+       line 261, "pan.___", state 2739, "(1)"
+       line 411, "pan.___", state 2757, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2771, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2789, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2803, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2822, "(1)"
+       line 433, "pan.___", state 2835, "(1)"
+       line 438, "pan.___", state 2852, "(1)"
+       line 442, "pan.___", state 2865, "(1)"
+       line 411, "pan.___", state 2886, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2900, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2918, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2951, "(1)"
+       line 433, "pan.___", state 2964, "(1)"
+       line 438, "pan.___", state 2981, "(1)"
+       line 442, "pan.___", state 2994, "(1)"
+       line 249, "pan.___", state 3027, "(1)"
+       line 257, "pan.___", state 3047, "(1)"
+       line 261, "pan.___", state 3055, "(1)"
+       line 249, "pan.___", state 3070, "(1)"
+       line 253, "pan.___", state 3078, "(1)"
+       line 257, "pan.___", state 3090, "(1)"
+       line 261, "pan.___", state 3098, "(1)"
+       line 898, "pan.___", state 3115, "-end-"
+       (283 of 3115 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 19, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 33, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 84, "(1)"
+       line 433, "pan.___", state 97, "(1)"
+       line 272, "pan.___", state 150, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 152, "(1)"
+       line 276, "pan.___", state 159, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 161, "(1)"
+       line 276, "pan.___", state 162, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 162, "else"
+       line 274, "pan.___", state 167, "((i<1))"
+       line 274, "pan.___", state 167, "((i>=1))"
+       line 280, "pan.___", state 172, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 174, "(1)"
+       line 280, "pan.___", state 175, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 175, "else"
+       line 284, "pan.___", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 183, "(1)"
+       line 284, "pan.___", state 184, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 184, "else"
+       line 289, "pan.___", state 193, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 289, "pan.___", state 193, "else"
+       line 411, "pan.___", state 212, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 226, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 244, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 258, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 277, "(1)"
+       line 433, "pan.___", state 290, "(1)"
+       line 438, "pan.___", state 307, "(1)"
+       line 442, "pan.___", state 320, "(1)"
+       line 415, "pan.___", state 357, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 375, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 389, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 421, "(1)"
+       line 438, "pan.___", state 438, "(1)"
+       line 442, "pan.___", state 451, "(1)"
+       line 415, "pan.___", state 496, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 514, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 528, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 560, "(1)"
+       line 438, "pan.___", state 577, "(1)"
+       line 442, "pan.___", state 590, "(1)"
+       line 415, "pan.___", state 625, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 643, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 657, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 689, "(1)"
+       line 438, "pan.___", state 706, "(1)"
+       line 442, "pan.___", state 719, "(1)"
+       line 415, "pan.___", state 756, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 774, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 788, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 820, "(1)"
+       line 438, "pan.___", state 837, "(1)"
+       line 442, "pan.___", state 850, "(1)"
+       line 272, "pan.___", state 905, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 914, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 952, "(1)"
+       line 253, "pan.___", state 960, "(1)"
+       line 257, "pan.___", state 972, "(1)"
+       line 261, "pan.___", state 980, "(1)"
+       line 276, "pan.___", state 1005, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1018, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1027, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1043, "(1)"
+       line 253, "pan.___", state 1051, "(1)"
+       line 257, "pan.___", state 1063, "(1)"
+       line 261, "pan.___", state 1071, "(1)"
+       line 276, "pan.___", state 1096, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1109, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1118, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1134, "(1)"
+       line 253, "pan.___", state 1142, "(1)"
+       line 257, "pan.___", state 1154, "(1)"
+       line 261, "pan.___", state 1162, "(1)"
+       line 276, "pan.___", state 1187, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1200, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1209, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1225, "(1)"
+       line 253, "pan.___", state 1233, "(1)"
+       line 257, "pan.___", state 1245, "(1)"
+       line 261, "pan.___", state 1253, "(1)"
+       line 1237, "pan.___", state 1268, "-end-"
+       (77 of 1268 states)
+unreached in proctype :init:
+       line 1248, "pan.___", state 9, "((j<2))"
+       line 1248, "pan.___", state 9, "((j>=2))"
+       line 1249, "pan.___", state 20, "((j<2))"
+       line 1249, "pan.___", state 20, "((j>=2))"
+       line 1254, "pan.___", state 33, "((j<2))"
+       line 1254, "pan.___", state 33, "((j>=2))"
+       line 1252, "pan.___", state 43, "((i<1))"
+       line 1252, "pan.___", state 43, "((i>=1))"
+       line 1262, "pan.___", state 54, "((j<2))"
+       line 1262, "pan.___", state 54, "((j>=2))"
+       line 1266, "pan.___", state 67, "((j<2))"
+       line 1266, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 4.43e+03 seconds
+pan: rate 1246.6728 states/second
+pan: avg transition delay 1.2479e-06 usec
+cp .input.spin urcu_free_no_wmb.spin.input
+cp .input.spin.trail urcu_free_no_wmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.spin.input b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.spin.input
new file mode 100644 (file)
index 0000000..49791b0
--- /dev/null
@@ -0,0 +1,1273 @@
+#define NO_WMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.spin.input.trail b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_no_wmb.spin.input.trail
new file mode 100644 (file)
index 0000000..b250110
--- /dev/null
@@ -0,0 +1,1423 @@
+-2:3:-2
+-4:-4:-4
+1:0:4463
+2:3:4383
+3:3:4386
+4:3:4386
+5:3:4389
+6:3:4397
+7:3:4397
+8:3:4400
+9:3:4406
+10:3:4410
+11:3:4410
+12:3:4413
+13:3:4423
+14:3:4431
+15:3:4431
+16:3:4434
+17:3:4440
+18:3:4444
+19:3:4444
+20:3:4447
+21:3:4453
+22:3:4457
+23:3:4458
+24:0:4463
+25:3:4460
+26:0:4463
+27:2:3117
+28:0:4463
+29:2:3123
+30:0:4463
+31:2:3124
+32:0:4463
+33:2:3126
+34:0:4463
+35:2:3127
+36:0:4463
+37:2:3128
+38:0:4463
+39:2:3129
+40:2:3130
+41:2:3134
+42:2:3135
+43:2:3143
+44:2:3144
+45:2:3148
+46:2:3149
+47:2:3157
+48:2:3162
+49:2:3166
+50:2:3167
+51:2:3175
+52:2:3176
+53:2:3180
+54:2:3181
+55:2:3175
+56:2:3176
+57:2:3180
+58:2:3181
+59:2:3189
+60:2:3194
+61:2:3195
+62:2:3206
+63:2:3207
+64:2:3208
+65:2:3219
+66:2:3224
+67:2:3225
+68:2:3236
+69:2:3237
+70:2:3238
+71:2:3236
+72:2:3237
+73:2:3238
+74:2:3249
+75:2:3257
+76:0:4463
+77:2:3128
+78:0:4463
+79:2:3309
+80:2:3310
+81:2:3311
+82:0:4463
+83:2:3128
+84:0:4463
+85:2:3316
+86:0:4463
+87:2:4020
+88:2:4021
+89:2:4025
+90:2:4029
+91:2:4030
+92:2:4034
+93:2:4039
+94:2:4047
+95:2:4051
+96:2:4052
+97:2:4047
+98:2:4048
+99:2:4056
+100:2:4063
+101:2:4070
+102:2:4071
+103:2:4078
+104:2:4083
+105:2:4090
+106:2:4091
+107:2:4090
+108:2:4091
+109:2:4098
+110:2:4102
+111:0:4463
+112:2:3318
+113:2:4001
+114:0:4463
+115:2:3128
+116:0:4463
+117:2:3319
+118:0:4463
+119:2:3128
+120:0:4463
+121:2:3322
+122:2:3323
+123:2:3327
+124:2:3328
+125:2:3336
+126:2:3337
+127:2:3341
+128:2:3342
+129:2:3350
+130:2:3355
+131:2:3359
+132:2:3360
+133:2:3368
+134:2:3369
+135:2:3373
+136:2:3374
+137:2:3368
+138:2:3369
+139:2:3373
+140:2:3374
+141:2:3382
+142:2:3387
+143:2:3388
+144:2:3399
+145:2:3400
+146:2:3401
+147:2:3412
+148:2:3417
+149:2:3418
+150:2:3429
+151:2:3430
+152:2:3431
+153:2:3429
+154:2:3430
+155:2:3431
+156:2:3442
+157:2:3449
+158:0:4463
+159:2:3128
+160:0:4463
+161:2:3453
+162:2:3454
+163:2:3455
+164:2:3467
+165:2:3468
+166:2:3472
+167:2:3473
+168:2:3481
+169:2:3486
+170:2:3490
+171:2:3491
+172:2:3499
+173:2:3500
+174:2:3504
+175:2:3505
+176:2:3499
+177:2:3500
+178:2:3504
+179:2:3505
+180:2:3513
+181:2:3518
+182:2:3519
+183:2:3530
+184:2:3531
+185:2:3532
+186:2:3543
+187:2:3548
+188:2:3549
+189:2:3560
+190:2:3561
+191:2:3562
+192:2:3560
+193:2:3561
+194:2:3562
+195:2:3573
+196:2:3584
+197:2:3585
+198:0:4463
+199:2:3128
+200:0:4463
+201:2:3592
+202:2:3593
+203:2:3597
+204:2:3598
+205:2:3606
+206:2:3607
+207:2:3611
+208:2:3612
+209:2:3620
+210:2:3625
+211:2:3629
+212:2:3630
+213:2:3638
+214:2:3639
+215:2:3643
+216:2:3644
+217:2:3638
+218:2:3639
+219:2:3643
+220:2:3644
+221:2:3652
+222:2:3657
+223:2:3658
+224:2:3669
+225:2:3670
+226:2:3671
+227:2:3682
+228:2:3687
+229:2:3688
+230:2:3699
+231:2:3700
+232:2:3701
+233:2:3699
+234:2:3700
+235:2:3701
+236:2:3712
+237:0:4463
+238:2:3128
+239:0:4463
+240:2:3721
+241:2:3722
+242:2:3726
+243:2:3727
+244:2:3735
+245:2:3736
+246:2:3740
+247:2:3741
+248:2:3749
+249:2:3754
+250:2:3758
+251:2:3759
+252:2:3767
+253:2:3768
+254:2:3772
+255:2:3773
+256:2:3767
+257:2:3768
+258:2:3772
+259:2:3773
+260:2:3781
+261:2:3786
+262:2:3787
+263:2:3798
+264:2:3799
+265:2:3800
+266:2:3811
+267:2:3816
+268:2:3817
+269:2:3828
+270:2:3829
+271:2:3830
+272:2:3828
+273:2:3829
+274:2:3830
+275:2:3841
+276:2:3848
+277:0:4463
+278:2:3128
+279:0:4463
+280:2:3852
+281:2:3853
+282:2:3854
+283:2:3866
+284:2:3867
+285:2:3871
+286:2:3872
+287:2:3880
+288:2:3885
+289:2:3889
+290:2:3890
+291:2:3898
+292:2:3899
+293:2:3903
+294:2:3904
+295:2:3898
+296:2:3899
+297:2:3903
+298:2:3904
+299:2:3912
+300:2:3917
+301:2:3918
+302:2:3929
+303:2:3930
+304:2:3931
+305:2:3942
+306:2:3947
+307:2:3948
+308:2:3959
+309:2:3960
+310:2:3961
+311:2:3959
+312:2:3960
+313:2:3961
+314:2:3972
+315:2:3982
+316:2:3983
+317:0:4463
+318:2:3128
+319:0:4463
+320:2:3989
+321:0:4463
+322:2:4293
+323:2:4294
+324:2:4298
+325:2:4302
+326:2:4303
+327:2:4307
+328:2:4315
+329:2:4316
+330:2:4320
+331:2:4324
+332:2:4325
+333:2:4320
+334:2:4324
+335:2:4325
+336:2:4329
+337:2:4336
+338:2:4343
+339:2:4344
+340:2:4351
+341:2:4356
+342:2:4363
+343:2:4364
+344:2:4363
+345:2:4364
+346:2:4371
+347:2:4375
+348:0:4463
+349:2:3991
+350:2:4001
+351:0:4463
+352:2:3128
+353:0:4463
+354:2:3992
+355:2:3993
+356:0:4463
+357:2:3128
+358:0:4463
+359:2:3997
+360:0:4463
+361:2:4005
+362:0:4463
+363:2:3124
+364:0:4463
+365:2:3126
+366:0:4463
+367:2:3127
+368:0:4463
+369:2:3128
+370:0:4463
+371:2:3309
+372:2:3310
+373:2:3311
+374:0:4463
+375:2:3128
+376:0:4463
+377:2:3129
+378:2:3130
+379:2:3134
+380:2:3135
+381:2:3143
+382:2:3144
+383:2:3148
+384:2:3149
+385:2:3157
+386:2:3162
+387:2:3163
+388:2:3175
+389:2:3176
+390:2:3177
+391:2:3175
+392:2:3176
+393:2:3180
+394:2:3181
+395:2:3189
+396:2:3194
+397:2:3195
+398:2:3206
+399:2:3207
+400:2:3208
+401:2:3219
+402:2:3224
+403:2:3225
+404:2:3236
+405:2:3237
+406:2:3238
+407:2:3236
+408:2:3237
+409:2:3238
+410:2:3249
+411:2:3257
+412:0:4463
+413:2:3128
+414:0:4463
+415:2:3316
+416:0:4463
+417:2:4020
+418:2:4021
+419:2:4025
+420:2:4029
+421:2:4030
+422:2:4034
+423:2:4042
+424:2:4043
+425:2:4047
+426:2:4048
+427:2:4047
+428:2:4051
+429:2:4052
+430:2:4056
+431:2:4063
+432:2:4070
+433:2:4071
+434:2:4078
+435:2:4083
+436:2:4090
+437:2:4091
+438:2:4090
+439:2:4091
+440:2:4098
+441:2:4102
+442:0:4463
+443:2:3318
+444:2:4001
+445:0:4463
+446:2:3128
+447:0:4463
+448:2:3319
+449:0:4463
+450:2:3128
+451:0:4463
+452:2:3322
+453:2:3323
+454:2:3327
+455:2:3328
+456:2:3336
+457:2:3337
+458:2:3341
+459:2:3342
+460:2:3350
+461:2:3355
+462:2:3359
+463:2:3360
+464:2:3368
+465:2:3369
+466:2:3373
+467:2:3374
+468:2:3368
+469:2:3369
+470:2:3373
+471:2:3374
+472:2:3382
+473:2:3387
+474:2:3388
+475:2:3399
+476:2:3400
+477:2:3401
+478:2:3412
+479:2:3417
+480:2:3418
+481:2:3429
+482:2:3430
+483:2:3431
+484:2:3429
+485:2:3430
+486:2:3431
+487:2:3442
+488:2:3449
+489:0:4463
+490:2:3128
+491:0:4463
+492:2:3453
+493:2:3454
+494:2:3455
+495:2:3467
+496:2:3468
+497:2:3472
+498:2:3473
+499:2:3481
+500:2:3486
+501:2:3490
+502:2:3491
+503:2:3499
+504:2:3500
+505:2:3504
+506:2:3505
+507:2:3499
+508:2:3500
+509:2:3504
+510:2:3505
+511:2:3513
+512:2:3518
+513:2:3519
+514:2:3530
+515:2:3531
+516:2:3532
+517:2:3543
+518:2:3548
+519:2:3549
+520:2:3560
+521:2:3561
+522:2:3562
+523:2:3560
+524:2:3561
+525:2:3562
+526:2:3573
+527:2:3584
+528:2:3585
+529:0:4463
+530:2:3128
+531:0:4463
+532:2:3592
+533:2:3593
+534:2:3597
+535:2:3598
+536:2:3606
+537:2:3607
+538:2:3611
+539:2:3612
+540:2:3620
+541:2:3625
+542:2:3629
+543:2:3630
+544:2:3638
+545:2:3639
+546:2:3643
+547:2:3644
+548:2:3638
+549:2:3639
+550:2:3643
+551:2:3644
+552:2:3652
+553:2:3657
+554:2:3658
+555:2:3669
+556:2:3670
+557:2:3671
+558:2:3682
+559:2:3687
+560:2:3688
+561:2:3699
+562:2:3700
+563:2:3701
+564:2:3699
+565:2:3700
+566:2:3701
+567:2:3712
+568:0:4463
+569:2:3128
+570:0:4463
+571:2:3721
+572:2:3722
+573:2:3726
+574:2:3727
+575:2:3735
+576:2:3736
+577:2:3740
+578:2:3741
+579:2:3749
+580:2:3754
+581:2:3758
+582:2:3759
+583:2:3767
+584:2:3768
+585:2:3772
+586:2:3773
+587:2:3767
+588:2:3768
+589:2:3772
+590:2:3773
+591:2:3781
+592:2:3786
+593:2:3787
+594:2:3798
+595:2:3799
+596:2:3800
+597:2:3811
+598:2:3816
+599:2:3817
+600:2:3828
+601:2:3829
+602:2:3830
+603:2:3828
+604:2:3829
+605:2:3830
+606:2:3841
+607:2:3848
+608:0:4463
+609:2:3128
+610:0:4463
+611:2:3852
+612:2:3853
+613:2:3854
+614:2:3866
+615:2:3867
+616:2:3871
+617:2:3872
+618:2:3880
+619:2:3885
+620:2:3889
+621:2:3890
+622:2:3898
+623:2:3899
+624:2:3903
+625:2:3904
+626:2:3898
+627:2:3899
+628:2:3903
+629:2:3904
+630:2:3912
+631:2:3917
+632:2:3918
+633:2:3929
+634:2:3930
+635:2:3931
+636:2:3942
+637:2:3947
+638:2:3948
+639:2:3959
+640:2:3960
+641:2:3961
+642:2:3959
+643:2:3960
+644:2:3961
+645:2:3972
+646:2:3982
+647:2:3983
+648:0:4463
+649:2:3128
+650:0:4463
+651:2:3989
+652:0:4463
+653:2:4293
+654:2:4294
+655:2:4298
+656:2:4302
+657:2:4303
+658:2:4307
+659:2:4315
+660:2:4316
+661:2:4320
+662:2:4324
+663:2:4325
+664:2:4320
+665:2:4324
+666:2:4325
+667:2:4329
+668:2:4336
+669:2:4343
+670:2:4344
+671:2:4351
+672:2:4356
+673:2:4363
+674:2:4364
+675:2:4363
+676:2:4364
+677:2:4371
+678:2:4375
+679:0:4463
+680:2:3991
+681:2:4001
+682:0:4463
+683:2:3128
+684:0:4463
+685:2:3992
+686:2:3993
+687:0:4463
+688:2:3128
+689:0:4463
+690:2:3997
+691:0:4463
+692:2:4005
+693:0:4463
+694:2:3124
+695:0:4463
+696:2:3126
+697:0:4463
+698:2:3127
+699:0:4463
+700:2:3128
+701:0:4463
+702:2:3129
+703:2:3130
+704:2:3134
+705:2:3135
+706:2:3143
+707:2:3144
+708:2:3148
+709:2:3149
+710:2:3157
+711:2:3162
+712:2:3166
+713:2:3167
+714:2:3175
+715:2:3176
+716:2:3180
+717:2:3181
+718:2:3175
+719:2:3176
+720:2:3177
+721:2:3189
+722:2:3194
+723:2:3195
+724:2:3206
+725:2:3207
+726:2:3208
+727:2:3219
+728:2:3224
+729:2:3225
+730:2:3236
+731:2:3237
+732:2:3238
+733:2:3236
+734:2:3237
+735:2:3238
+736:2:3249
+737:2:3257
+738:0:4463
+739:2:3128
+740:0:4463
+741:2:3309
+742:2:3310
+743:2:3311
+744:0:4463
+745:2:3128
+746:0:4463
+747:2:3316
+748:0:4463
+749:1:2
+750:0:4463
+751:1:8
+752:0:4463
+753:1:9
+754:0:4463
+755:1:10
+756:0:4463
+757:1:11
+758:0:4463
+759:1:12
+760:1:13
+761:1:17
+762:1:18
+763:1:26
+764:1:27
+765:1:31
+766:1:32
+767:1:40
+768:1:45
+769:1:49
+770:1:50
+771:1:58
+772:1:59
+773:1:63
+774:1:64
+775:1:58
+776:1:59
+777:1:63
+778:1:64
+779:1:72
+780:1:77
+781:1:78
+782:1:89
+783:1:90
+784:1:91
+785:1:102
+786:1:107
+787:1:108
+788:1:119
+789:1:120
+790:1:121
+791:1:119
+792:1:120
+793:1:121
+794:1:132
+795:0:4463
+796:1:11
+797:0:4463
+798:1:141
+799:1:142
+800:0:4463
+801:1:11
+802:0:4463
+803:1:148
+804:1:149
+805:1:153
+806:1:154
+807:1:162
+808:1:163
+809:1:167
+810:1:168
+811:1:176
+812:1:181
+813:1:185
+814:1:186
+815:1:194
+816:1:195
+817:1:199
+818:1:200
+819:1:194
+820:1:195
+821:1:199
+822:1:200
+823:1:208
+824:1:213
+825:1:214
+826:1:225
+827:1:226
+828:1:227
+829:1:238
+830:1:243
+831:1:244
+832:1:255
+833:1:256
+834:1:257
+835:1:255
+836:1:256
+837:1:257
+838:1:268
+839:0:4463
+840:1:11
+841:0:4463
+842:1:277
+843:1:278
+844:1:282
+845:1:283
+846:1:291
+847:1:292
+848:1:296
+849:1:297
+850:1:305
+851:1:310
+852:1:314
+853:1:315
+854:1:323
+855:1:324
+856:1:328
+857:1:329
+858:1:323
+859:1:324
+860:1:328
+861:1:329
+862:1:337
+863:1:342
+864:1:343
+865:1:354
+866:1:355
+867:1:356
+868:1:367
+869:1:372
+870:1:373
+871:1:384
+872:1:385
+873:1:386
+874:1:384
+875:1:385
+876:1:386
+877:1:397
+878:1:404
+879:0:4463
+880:1:11
+881:0:4463
+882:1:540
+883:1:544
+884:1:545
+885:1:549
+886:1:550
+887:1:558
+888:1:566
+889:1:567
+890:1:571
+891:1:575
+892:1:576
+893:1:571
+894:1:575
+895:1:576
+896:1:580
+897:1:587
+898:1:594
+899:1:595
+900:1:602
+901:1:607
+902:1:614
+903:1:615
+904:1:614
+905:1:615
+906:1:622
+907:0:4463
+908:1:11
+909:0:4463
+910:1:632
+911:1:633
+912:1:637
+913:1:638
+914:1:646
+915:1:647
+916:1:651
+917:1:652
+918:1:660
+919:1:665
+920:1:669
+921:1:670
+922:1:678
+923:1:679
+924:1:683
+925:1:684
+926:1:678
+927:1:679
+928:1:683
+929:1:684
+930:1:692
+931:1:697
+932:1:698
+933:1:709
+934:1:710
+935:1:711
+936:1:722
+937:1:727
+938:1:728
+939:1:739
+940:1:740
+941:1:741
+942:1:739
+943:1:740
+944:1:741
+945:1:752
+946:0:4463
+947:1:11
+948:0:4463
+949:1:761
+950:1:764
+951:1:765
+952:0:4463
+953:1:11
+954:0:4463
+955:1:768
+956:1:769
+957:1:773
+958:1:774
+959:1:782
+960:1:783
+961:1:787
+962:1:788
+963:1:796
+964:1:801
+965:1:805
+966:1:806
+967:1:814
+968:1:815
+969:1:819
+970:1:820
+971:1:814
+972:1:815
+973:1:819
+974:1:820
+975:1:828
+976:1:833
+977:1:834
+978:1:845
+979:1:846
+980:1:847
+981:1:858
+982:1:863
+983:1:864
+984:1:875
+985:1:876
+986:1:877
+987:1:875
+988:1:876
+989:1:877
+990:1:888
+991:0:4463
+992:1:11
+993:0:4463
+994:1:1028
+995:1:1029
+996:1:1033
+997:1:1034
+998:1:1042
+999:1:1043
+1000:1:1047
+1001:1:1048
+1002:1:1056
+1003:1:1061
+1004:1:1065
+1005:1:1066
+1006:1:1074
+1007:1:1075
+1008:1:1079
+1009:1:1080
+1010:1:1074
+1011:1:1075
+1012:1:1079
+1013:1:1080
+1014:1:1088
+1015:1:1093
+1016:1:1094
+1017:1:1105
+1018:1:1106
+1019:1:1107
+1020:1:1118
+1021:1:1123
+1022:1:1124
+1023:1:1135
+1024:1:1136
+1025:1:1137
+1026:1:1135
+1027:1:1136
+1028:1:1137
+1029:1:1148
+1030:1:1155
+1031:1:1159
+1032:0:4463
+1033:1:11
+1034:0:4463
+1035:1:1160
+1036:1:1161
+1037:1:1165
+1038:1:1166
+1039:1:1174
+1040:1:1175
+1041:1:1176
+1042:1:1188
+1043:1:1193
+1044:1:1197
+1045:1:1198
+1046:1:1206
+1047:1:1207
+1048:1:1211
+1049:1:1212
+1050:1:1206
+1051:1:1207
+1052:1:1211
+1053:1:1212
+1054:1:1220
+1055:1:1225
+1056:1:1226
+1057:1:1237
+1058:1:1238
+1059:1:1239
+1060:1:1250
+1061:1:1255
+1062:1:1256
+1063:1:1267
+1064:1:1268
+1065:1:1269
+1066:1:1267
+1067:1:1268
+1068:1:1269
+1069:1:1280
+1070:0:4463
+1071:1:11
+1072:0:4463
+1073:1:1289
+1074:0:4463
+1075:1:3023
+1076:1:3030
+1077:1:3031
+1078:1:3038
+1079:1:3043
+1080:1:3050
+1081:1:3051
+1082:1:3050
+1083:1:3051
+1084:1:3058
+1085:1:3062
+1086:0:4463
+1087:2:4020
+1088:2:4021
+1089:2:4025
+1090:2:4029
+1091:2:4030
+1092:2:4034
+1093:2:4039
+1094:2:4047
+1095:2:4051
+1096:2:4052
+1097:2:4047
+1098:2:4048
+1099:2:4056
+1100:2:4063
+1101:2:4070
+1102:2:4071
+1103:2:4078
+1104:2:4083
+1105:2:4090
+1106:2:4091
+1107:2:4090
+1108:2:4091
+1109:2:4098
+1110:2:4102
+1111:0:4463
+1112:2:3318
+1113:2:4001
+1114:0:4463
+1115:2:3128
+1116:0:4463
+1117:2:3319
+1118:0:4463
+1119:2:3128
+1120:0:4463
+1121:2:3322
+1122:2:3323
+1123:2:3327
+1124:2:3328
+1125:2:3336
+1126:2:3337
+1127:2:3341
+1128:2:3342
+1129:2:3350
+1130:2:3355
+1131:2:3359
+1132:2:3360
+1133:2:3368
+1134:2:3369
+1135:2:3373
+1136:2:3374
+1137:2:3368
+1138:2:3369
+1139:2:3373
+1140:2:3374
+1141:2:3382
+1142:2:3387
+1143:2:3388
+1144:2:3399
+1145:2:3400
+1146:2:3401
+1147:2:3412
+1148:2:3417
+1149:2:3418
+1150:2:3429
+1151:2:3430
+1152:2:3431
+1153:2:3429
+1154:2:3430
+1155:2:3431
+1156:2:3442
+1157:2:3449
+1158:0:4463
+1159:2:3128
+1160:0:4463
+1161:2:3453
+1162:2:3454
+1163:2:3455
+1164:2:3467
+1165:2:3468
+1166:2:3472
+1167:2:3473
+1168:2:3481
+1169:2:3486
+1170:2:3490
+1171:2:3491
+1172:2:3499
+1173:2:3500
+1174:2:3504
+1175:2:3505
+1176:2:3499
+1177:2:3500
+1178:2:3504
+1179:2:3505
+1180:2:3513
+1181:2:3518
+1182:2:3519
+1183:2:3530
+1184:2:3531
+1185:2:3532
+1186:2:3543
+1187:2:3548
+1188:2:3549
+1189:2:3560
+1190:2:3561
+1191:2:3562
+1192:2:3560
+1193:2:3561
+1194:2:3562
+1195:2:3573
+1196:2:3582
+1197:0:4463
+1198:2:3128
+1199:0:4463
+1200:2:3588
+1201:0:4463
+1202:2:4111
+1203:2:4112
+1204:2:4116
+1205:2:4120
+1206:2:4121
+1207:2:4125
+1208:2:4133
+1209:2:4134
+1210:2:4138
+1211:2:4142
+1212:2:4143
+1213:2:4138
+1214:2:4142
+1215:2:4143
+1216:2:4147
+1217:2:4154
+1218:2:4161
+1219:2:4162
+1220:2:4169
+1221:2:4174
+1222:2:4181
+1223:2:4182
+1224:2:4181
+1225:2:4182
+1226:2:4189
+1227:2:4193
+1228:0:4463
+1229:2:3590
+1230:2:3591
+1231:0:4463
+1232:2:3128
+1233:0:4463
+1234:2:3592
+1235:2:3593
+1236:2:3597
+1237:2:3598
+1238:2:3606
+1239:2:3607
+1240:2:3611
+1241:2:3612
+1242:2:3620
+1243:2:3625
+1244:2:3629
+1245:2:3630
+1246:2:3638
+1247:2:3639
+1248:2:3643
+1249:2:3644
+1250:2:3638
+1251:2:3639
+1252:2:3643
+1253:2:3644
+1254:2:3652
+1255:2:3657
+1256:2:3658
+1257:2:3669
+1258:2:3670
+1259:2:3671
+1260:2:3682
+1261:2:3687
+1262:2:3688
+1263:2:3699
+1264:2:3700
+1265:2:3701
+1266:2:3699
+1267:2:3700
+1268:2:3701
+1269:2:3712
+1270:0:4463
+1271:2:3128
+1272:0:4463
+1273:2:3453
+1274:2:3454
+1275:2:3458
+1276:2:3459
+1277:2:3467
+1278:2:3468
+1279:2:3472
+1280:2:3473
+1281:2:3481
+1282:2:3486
+1283:2:3490
+1284:2:3491
+1285:2:3499
+1286:2:3500
+1287:2:3504
+1288:2:3505
+1289:2:3499
+1290:2:3500
+1291:2:3504
+1292:2:3505
+1293:2:3513
+1294:2:3518
+1295:2:3519
+1296:2:3530
+1297:2:3531
+1298:2:3532
+1299:2:3543
+1300:2:3548
+1301:2:3549
+1302:2:3560
+1303:2:3561
+1304:2:3562
+1305:2:3560
+1306:2:3561
+1307:2:3562
+1308:2:3573
+1309:2:3582
+1310:0:4463
+1311:2:3128
+1312:0:4463
+1313:2:3588
+1314:0:4463
+1315:2:4111
+1316:2:4112
+1317:2:4116
+1318:2:4120
+1319:2:4121
+1320:2:4125
+1321:2:4133
+1322:2:4134
+1323:2:4138
+1324:2:4142
+1325:2:4143
+1326:2:4138
+1327:2:4142
+1328:2:4143
+1329:2:4147
+1330:2:4154
+1331:2:4161
+1332:2:4162
+1333:2:4169
+1334:2:4174
+1335:2:4181
+1336:2:4182
+1337:2:4181
+1338:2:4182
+1339:2:4189
+1340:2:4193
+1341:0:4463
+1342:2:3590
+1343:2:3591
+1344:0:4463
+1345:2:3128
+1346:0:4463
+1347:2:3453
+1348:2:3454
+1349:2:3458
+1350:2:3459
+1351:2:3467
+1352:2:3468
+1353:2:3472
+1354:2:3473
+1355:2:3481
+1356:2:3486
+1357:2:3490
+1358:2:3491
+1359:2:3499
+1360:2:3500
+1361:2:3504
+1362:2:3505
+1363:2:3499
+1364:2:3500
+1365:2:3504
+1366:2:3505
+1367:2:3513
+1368:2:3518
+1369:2:3519
+1370:2:3530
+1371:2:3531
+1372:2:3532
+1373:2:3543
+1374:2:3548
+1375:2:3549
+1376:2:3560
+1377:2:3561
+1378:2:3562
+1379:2:3560
+1380:2:3561
+1381:2:3562
+1382:2:3573
+1383:2:3582
+1384:0:4463
+1385:2:3128
+1386:0:4463
+1387:2:3588
+1388:0:4463
+1389:2:4111
+1390:2:4112
+1391:2:4116
+1392:2:4120
+1393:2:4121
+1394:2:4125
+1395:2:4133
+1396:2:4134
+1397:2:4138
+1398:2:4142
+1399:2:4143
+1400:2:4138
+1401:2:4142
+1402:2:4143
+1403:2:4147
+1404:2:4154
+1405:2:4161
+1406:2:4162
+1407:2:4169
+1408:2:4174
+1409:2:4181
+1410:2:4182
+1411:2:4181
+1412:2:4182
+1413:2:4189
+1414:2:4193
+1415:0:4463
+1416:1:1291
+1417:1:1292
+1418:0:4461
+1419:1:11
+1420:0:4467
+1421:1:2299
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.define b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.define
new file mode 100644 (file)
index 0000000..5e642ef
--- /dev/null
@@ -0,0 +1 @@
+#define SINGLE_FLIP
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.log b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.log
new file mode 100644 (file)
index 0000000..90b0d78
--- /dev/null
@@ -0,0 +1,632 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_single_flip.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    5164 States=    1e+06 Transitions= 4.75e+08 Memory=   550.432        t=    582 R=   2e+03
+Depth=    5746 States=    2e+06 Transitions= 1.02e+09 Memory=   634.318        t= 1.27e+03 R=   2e+03
+pan: claim violated! (at depth 1144)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 5746, errors: 1
+  2322493 states, stored
+1.1944827e+09 states, matched
+1.1968052e+09 transitions (= stored+matched)
+6.6651798e+09 atomic steps
+hash conflicts: 7.6855101e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+  256.929      equivalent memory usage for states (stored*(State-vector + overhead))
+  195.871      actual memory usage for states (compression: 76.24%)
+               state-vector as stored = 60 byte + 28 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  661.467      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 82, "(1)"
+       line 438, "pan.___", state 112, "(1)"
+       line 442, "pan.___", state 125, "(1)"
+       line 597, "pan.___", state 146, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 153, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 185, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 199, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 218, "(1)"
+       line 438, "pan.___", state 248, "(1)"
+       line 442, "pan.___", state 261, "(1)"
+       line 411, "pan.___", state 282, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 314, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 347, "(1)"
+       line 438, "pan.___", state 377, "(1)"
+       line 442, "pan.___", state 390, "(1)"
+       line 411, "pan.___", state 413, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 415, "(1)"
+       line 411, "pan.___", state 416, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 416, "else"
+       line 411, "pan.___", state 419, "(1)"
+       line 415, "pan.___", state 427, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 429, "(1)"
+       line 415, "pan.___", state 430, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 430, "else"
+       line 415, "pan.___", state 433, "(1)"
+       line 415, "pan.___", state 434, "(1)"
+       line 415, "pan.___", state 434, "(1)"
+       line 413, "pan.___", state 439, "((i<1))"
+       line 413, "pan.___", state 439, "((i>=1))"
+       line 420, "pan.___", state 445, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 447, "(1)"
+       line 420, "pan.___", state 448, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 448, "else"
+       line 420, "pan.___", state 451, "(1)"
+       line 420, "pan.___", state 452, "(1)"
+       line 420, "pan.___", state 452, "(1)"
+       line 424, "pan.___", state 459, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 461, "(1)"
+       line 424, "pan.___", state 462, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 462, "else"
+       line 424, "pan.___", state 465, "(1)"
+       line 424, "pan.___", state 466, "(1)"
+       line 424, "pan.___", state 466, "(1)"
+       line 422, "pan.___", state 471, "((i<2))"
+       line 422, "pan.___", state 471, "((i>=2))"
+       line 429, "pan.___", state 478, "(1)"
+       line 429, "pan.___", state 479, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 479, "else"
+       line 429, "pan.___", state 482, "(1)"
+       line 429, "pan.___", state 483, "(1)"
+       line 429, "pan.___", state 483, "(1)"
+       line 433, "pan.___", state 491, "(1)"
+       line 433, "pan.___", state 492, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 492, "else"
+       line 433, "pan.___", state 495, "(1)"
+       line 433, "pan.___", state 496, "(1)"
+       line 433, "pan.___", state 496, "(1)"
+       line 431, "pan.___", state 501, "((i<1))"
+       line 431, "pan.___", state 501, "((i>=1))"
+       line 438, "pan.___", state 508, "(1)"
+       line 438, "pan.___", state 509, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 509, "else"
+       line 438, "pan.___", state 512, "(1)"
+       line 438, "pan.___", state 513, "(1)"
+       line 438, "pan.___", state 513, "(1)"
+       line 442, "pan.___", state 521, "(1)"
+       line 442, "pan.___", state 522, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 522, "else"
+       line 442, "pan.___", state 525, "(1)"
+       line 442, "pan.___", state 526, "(1)"
+       line 442, "pan.___", state 526, "(1)"
+       line 440, "pan.___", state 531, "((i<2))"
+       line 440, "pan.___", state 531, "((i>=2))"
+       line 450, "pan.___", state 535, "(1)"
+       line 450, "pan.___", state 535, "(1)"
+       line 597, "pan.___", state 538, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 597, "pan.___", state 539, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 597, "pan.___", state 540, "(1)"
+       line 272, "pan.___", state 544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 555, "(1)"
+       line 280, "pan.___", state 566, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 575, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 591, "(1)"
+       line 253, "pan.___", state 599, "(1)"
+       line 257, "pan.___", state 611, "(1)"
+       line 261, "pan.___", state 619, "(1)"
+       line 411, "pan.___", state 637, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 651, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 669, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 683, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 702, "(1)"
+       line 433, "pan.___", state 715, "(1)"
+       line 438, "pan.___", state 732, "(1)"
+       line 442, "pan.___", state 745, "(1)"
+       line 411, "pan.___", state 773, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 805, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 819, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 838, "(1)"
+       line 438, "pan.___", state 868, "(1)"
+       line 442, "pan.___", state 881, "(1)"
+       line 411, "pan.___", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 904, "(1)"
+       line 411, "pan.___", state 905, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 905, "else"
+       line 411, "pan.___", state 908, "(1)"
+       line 415, "pan.___", state 916, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 918, "(1)"
+       line 415, "pan.___", state 919, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 919, "else"
+       line 415, "pan.___", state 922, "(1)"
+       line 415, "pan.___", state 923, "(1)"
+       line 415, "pan.___", state 923, "(1)"
+       line 413, "pan.___", state 928, "((i<1))"
+       line 413, "pan.___", state 928, "((i>=1))"
+       line 420, "pan.___", state 934, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 936, "(1)"
+       line 420, "pan.___", state 937, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 937, "else"
+       line 420, "pan.___", state 940, "(1)"
+       line 420, "pan.___", state 941, "(1)"
+       line 420, "pan.___", state 941, "(1)"
+       line 424, "pan.___", state 948, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 950, "(1)"
+       line 424, "pan.___", state 951, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 951, "else"
+       line 424, "pan.___", state 954, "(1)"
+       line 424, "pan.___", state 955, "(1)"
+       line 424, "pan.___", state 955, "(1)"
+       line 422, "pan.___", state 960, "((i<2))"
+       line 422, "pan.___", state 960, "((i>=2))"
+       line 429, "pan.___", state 967, "(1)"
+       line 429, "pan.___", state 968, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 968, "else"
+       line 429, "pan.___", state 971, "(1)"
+       line 429, "pan.___", state 972, "(1)"
+       line 429, "pan.___", state 972, "(1)"
+       line 433, "pan.___", state 980, "(1)"
+       line 433, "pan.___", state 981, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 981, "else"
+       line 433, "pan.___", state 984, "(1)"
+       line 433, "pan.___", state 985, "(1)"
+       line 433, "pan.___", state 985, "(1)"
+       line 431, "pan.___", state 990, "((i<1))"
+       line 431, "pan.___", state 990, "((i>=1))"
+       line 438, "pan.___", state 997, "(1)"
+       line 438, "pan.___", state 998, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 998, "else"
+       line 438, "pan.___", state 1001, "(1)"
+       line 438, "pan.___", state 1002, "(1)"
+       line 438, "pan.___", state 1002, "(1)"
+       line 442, "pan.___", state 1010, "(1)"
+       line 442, "pan.___", state 1011, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 1011, "else"
+       line 442, "pan.___", state 1014, "(1)"
+       line 442, "pan.___", state 1015, "(1)"
+       line 442, "pan.___", state 1015, "(1)"
+       line 440, "pan.___", state 1020, "((i<2))"
+       line 440, "pan.___", state 1020, "((i>=2))"
+       line 450, "pan.___", state 1024, "(1)"
+       line 450, "pan.___", state 1024, "(1)"
+       line 605, "pan.___", state 1028, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 1033, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1047, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1065, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1079, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1098, "(1)"
+       line 433, "pan.___", state 1111, "(1)"
+       line 438, "pan.___", state 1128, "(1)"
+       line 442, "pan.___", state 1141, "(1)"
+       line 411, "pan.___", state 1165, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1197, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1211, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1230, "(1)"
+       line 438, "pan.___", state 1260, "(1)"
+       line 442, "pan.___", state 1273, "(1)"
+       line 411, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1363, "(1)"
+       line 438, "pan.___", state 1393, "(1)"
+       line 442, "pan.___", state 1406, "(1)"
+       line 411, "pan.___", state 1427, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1492, "(1)"
+       line 438, "pan.___", state 1522, "(1)"
+       line 442, "pan.___", state 1535, "(1)"
+       line 272, "pan.___", state 1558, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1580, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1589, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1605, "(1)"
+       line 253, "pan.___", state 1613, "(1)"
+       line 257, "pan.___", state 1625, "(1)"
+       line 261, "pan.___", state 1633, "(1)"
+       line 411, "pan.___", state 1651, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1716, "(1)"
+       line 433, "pan.___", state 1729, "(1)"
+       line 438, "pan.___", state 1746, "(1)"
+       line 442, "pan.___", state 1759, "(1)"
+       line 411, "pan.___", state 1780, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1794, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1812, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1845, "(1)"
+       line 433, "pan.___", state 1858, "(1)"
+       line 438, "pan.___", state 1875, "(1)"
+       line 442, "pan.___", state 1888, "(1)"
+       line 411, "pan.___", state 1912, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1944, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1958, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1977, "(1)"
+       line 438, "pan.___", state 2007, "(1)"
+       line 442, "pan.___", state 2020, "(1)"
+       line 644, "pan.___", state 2041, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 2048, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2080, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2094, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2113, "(1)"
+       line 438, "pan.___", state 2143, "(1)"
+       line 442, "pan.___", state 2156, "(1)"
+       line 411, "pan.___", state 2177, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2209, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2242, "(1)"
+       line 438, "pan.___", state 2272, "(1)"
+       line 442, "pan.___", state 2285, "(1)"
+       line 411, "pan.___", state 2308, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2310, "(1)"
+       line 411, "pan.___", state 2311, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2311, "else"
+       line 411, "pan.___", state 2314, "(1)"
+       line 415, "pan.___", state 2322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2324, "(1)"
+       line 415, "pan.___", state 2325, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2325, "else"
+       line 415, "pan.___", state 2328, "(1)"
+       line 415, "pan.___", state 2329, "(1)"
+       line 415, "pan.___", state 2329, "(1)"
+       line 413, "pan.___", state 2334, "((i<1))"
+       line 413, "pan.___", state 2334, "((i>=1))"
+       line 420, "pan.___", state 2340, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2342, "(1)"
+       line 420, "pan.___", state 2343, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2343, "else"
+       line 420, "pan.___", state 2346, "(1)"
+       line 420, "pan.___", state 2347, "(1)"
+       line 420, "pan.___", state 2347, "(1)"
+       line 424, "pan.___", state 2354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2356, "(1)"
+       line 424, "pan.___", state 2357, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2357, "else"
+       line 424, "pan.___", state 2360, "(1)"
+       line 424, "pan.___", state 2361, "(1)"
+       line 424, "pan.___", state 2361, "(1)"
+       line 422, "pan.___", state 2366, "((i<2))"
+       line 422, "pan.___", state 2366, "((i>=2))"
+       line 429, "pan.___", state 2373, "(1)"
+       line 429, "pan.___", state 2374, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 2374, "else"
+       line 429, "pan.___", state 2377, "(1)"
+       line 429, "pan.___", state 2378, "(1)"
+       line 429, "pan.___", state 2378, "(1)"
+       line 433, "pan.___", state 2386, "(1)"
+       line 433, "pan.___", state 2387, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 2387, "else"
+       line 433, "pan.___", state 2390, "(1)"
+       line 433, "pan.___", state 2391, "(1)"
+       line 433, "pan.___", state 2391, "(1)"
+       line 431, "pan.___", state 2396, "((i<1))"
+       line 431, "pan.___", state 2396, "((i>=1))"
+       line 438, "pan.___", state 2403, "(1)"
+       line 438, "pan.___", state 2404, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 2404, "else"
+       line 438, "pan.___", state 2407, "(1)"
+       line 438, "pan.___", state 2408, "(1)"
+       line 438, "pan.___", state 2408, "(1)"
+       line 442, "pan.___", state 2416, "(1)"
+       line 442, "pan.___", state 2417, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 2417, "else"
+       line 442, "pan.___", state 2420, "(1)"
+       line 442, "pan.___", state 2421, "(1)"
+       line 442, "pan.___", state 2421, "(1)"
+       line 440, "pan.___", state 2426, "((i<2))"
+       line 440, "pan.___", state 2426, "((i>=2))"
+       line 450, "pan.___", state 2430, "(1)"
+       line 450, "pan.___", state 2430, "(1)"
+       line 644, "pan.___", state 2433, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 644, "pan.___", state 2434, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 644, "pan.___", state 2435, "(1)"
+       line 272, "pan.___", state 2439, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2461, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2470, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2486, "(1)"
+       line 253, "pan.___", state 2494, "(1)"
+       line 257, "pan.___", state 2506, "(1)"
+       line 261, "pan.___", state 2514, "(1)"
+       line 411, "pan.___", state 2532, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2546, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2564, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2578, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2597, "(1)"
+       line 433, "pan.___", state 2610, "(1)"
+       line 438, "pan.___", state 2627, "(1)"
+       line 442, "pan.___", state 2640, "(1)"
+       line 272, "pan.___", state 2664, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2673, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2686, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2695, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2711, "(1)"
+       line 253, "pan.___", state 2719, "(1)"
+       line 257, "pan.___", state 2731, "(1)"
+       line 261, "pan.___", state 2739, "(1)"
+       line 411, "pan.___", state 2757, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2771, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2789, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2803, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2822, "(1)"
+       line 433, "pan.___", state 2835, "(1)"
+       line 438, "pan.___", state 2852, "(1)"
+       line 442, "pan.___", state 2865, "(1)"
+       line 411, "pan.___", state 2886, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2900, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2918, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2951, "(1)"
+       line 433, "pan.___", state 2964, "(1)"
+       line 438, "pan.___", state 2981, "(1)"
+       line 442, "pan.___", state 2994, "(1)"
+       line 249, "pan.___", state 3027, "(1)"
+       line 257, "pan.___", state 3047, "(1)"
+       line 261, "pan.___", state 3055, "(1)"
+       line 249, "pan.___", state 3070, "(1)"
+       line 253, "pan.___", state 3078, "(1)"
+       line 257, "pan.___", state 3090, "(1)"
+       line 261, "pan.___", state 3098, "(1)"
+       line 898, "pan.___", state 3115, "-end-"
+       (283 of 3115 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 22, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 36, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 54, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 87, "(1)"
+       line 433, "pan.___", state 100, "(1)"
+       line 438, "pan.___", state 117, "(1)"
+       line 272, "pan.___", state 153, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 162, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 175, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 215, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 229, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 247, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 261, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 280, "(1)"
+       line 433, "pan.___", state 293, "(1)"
+       line 438, "pan.___", state 310, "(1)"
+       line 442, "pan.___", state 323, "(1)"
+       line 415, "pan.___", state 360, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 378, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 392, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 424, "(1)"
+       line 438, "pan.___", state 441, "(1)"
+       line 442, "pan.___", state 454, "(1)"
+       line 411, "pan.___", state 484, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 498, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 516, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 530, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 549, "(1)"
+       line 433, "pan.___", state 562, "(1)"
+       line 438, "pan.___", state 579, "(1)"
+       line 442, "pan.___", state 592, "(1)"
+       line 411, "pan.___", state 613, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 615, "(1)"
+       line 411, "pan.___", state 616, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 616, "else"
+       line 411, "pan.___", state 619, "(1)"
+       line 415, "pan.___", state 627, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 629, "(1)"
+       line 415, "pan.___", state 630, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 630, "else"
+       line 415, "pan.___", state 633, "(1)"
+       line 415, "pan.___", state 634, "(1)"
+       line 415, "pan.___", state 634, "(1)"
+       line 413, "pan.___", state 639, "((i<1))"
+       line 413, "pan.___", state 639, "((i>=1))"
+       line 420, "pan.___", state 645, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 647, "(1)"
+       line 420, "pan.___", state 648, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 648, "else"
+       line 420, "pan.___", state 651, "(1)"
+       line 420, "pan.___", state 652, "(1)"
+       line 420, "pan.___", state 652, "(1)"
+       line 424, "pan.___", state 659, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 661, "(1)"
+       line 424, "pan.___", state 662, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 662, "else"
+       line 424, "pan.___", state 665, "(1)"
+       line 424, "pan.___", state 666, "(1)"
+       line 424, "pan.___", state 666, "(1)"
+       line 422, "pan.___", state 671, "((i<2))"
+       line 422, "pan.___", state 671, "((i>=2))"
+       line 429, "pan.___", state 678, "(1)"
+       line 429, "pan.___", state 679, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 679, "else"
+       line 429, "pan.___", state 682, "(1)"
+       line 429, "pan.___", state 683, "(1)"
+       line 429, "pan.___", state 683, "(1)"
+       line 433, "pan.___", state 691, "(1)"
+       line 433, "pan.___", state 692, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 692, "else"
+       line 433, "pan.___", state 695, "(1)"
+       line 433, "pan.___", state 696, "(1)"
+       line 433, "pan.___", state 696, "(1)"
+       line 431, "pan.___", state 701, "((i<1))"
+       line 431, "pan.___", state 701, "((i>=1))"
+       line 438, "pan.___", state 708, "(1)"
+       line 438, "pan.___", state 709, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 709, "else"
+       line 438, "pan.___", state 712, "(1)"
+       line 438, "pan.___", state 713, "(1)"
+       line 438, "pan.___", state 713, "(1)"
+       line 442, "pan.___", state 721, "(1)"
+       line 442, "pan.___", state 722, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 722, "else"
+       line 442, "pan.___", state 725, "(1)"
+       line 442, "pan.___", state 726, "(1)"
+       line 442, "pan.___", state 726, "(1)"
+       line 440, "pan.___", state 731, "((i<2))"
+       line 440, "pan.___", state 731, "((i>=2))"
+       line 450, "pan.___", state 735, "(1)"
+       line 450, "pan.___", state 735, "(1)"
+       line 1117, "pan.___", state 739, "_proc_urcu_writer = (_proc_urcu_writer|(1<<10))"
+       line 411, "pan.___", state 744, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 746, "(1)"
+       line 411, "pan.___", state 747, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 747, "else"
+       line 411, "pan.___", state 750, "(1)"
+       line 415, "pan.___", state 758, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 760, "(1)"
+       line 415, "pan.___", state 761, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 761, "else"
+       line 415, "pan.___", state 764, "(1)"
+       line 415, "pan.___", state 765, "(1)"
+       line 415, "pan.___", state 765, "(1)"
+       line 413, "pan.___", state 770, "((i<1))"
+       line 413, "pan.___", state 770, "((i>=1))"
+       line 420, "pan.___", state 776, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 778, "(1)"
+       line 420, "pan.___", state 779, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 779, "else"
+       line 420, "pan.___", state 782, "(1)"
+       line 420, "pan.___", state 783, "(1)"
+       line 420, "pan.___", state 783, "(1)"
+       line 424, "pan.___", state 790, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 792, "(1)"
+       line 424, "pan.___", state 793, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 793, "else"
+       line 424, "pan.___", state 796, "(1)"
+       line 424, "pan.___", state 797, "(1)"
+       line 424, "pan.___", state 797, "(1)"
+       line 422, "pan.___", state 802, "((i<2))"
+       line 422, "pan.___", state 802, "((i>=2))"
+       line 429, "pan.___", state 809, "(1)"
+       line 429, "pan.___", state 810, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 810, "else"
+       line 429, "pan.___", state 813, "(1)"
+       line 429, "pan.___", state 814, "(1)"
+       line 429, "pan.___", state 814, "(1)"
+       line 433, "pan.___", state 822, "(1)"
+       line 433, "pan.___", state 823, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 823, "else"
+       line 433, "pan.___", state 826, "(1)"
+       line 433, "pan.___", state 827, "(1)"
+       line 433, "pan.___", state 827, "(1)"
+       line 431, "pan.___", state 832, "((i<1))"
+       line 431, "pan.___", state 832, "((i>=1))"
+       line 438, "pan.___", state 839, "(1)"
+       line 438, "pan.___", state 840, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 840, "else"
+       line 438, "pan.___", state 843, "(1)"
+       line 438, "pan.___", state 844, "(1)"
+       line 438, "pan.___", state 844, "(1)"
+       line 442, "pan.___", state 852, "(1)"
+       line 442, "pan.___", state 853, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 853, "else"
+       line 442, "pan.___", state 856, "(1)"
+       line 442, "pan.___", state 857, "(1)"
+       line 442, "pan.___", state 857, "(1)"
+       line 440, "pan.___", state 862, "((i<2))"
+       line 440, "pan.___", state 862, "((i>=2))"
+       line 450, "pan.___", state 866, "(1)"
+       line 450, "pan.___", state 866, "(1)"
+       line 1133, "pan.___", state 871, "_proc_urcu_writer = (_proc_urcu_writer|(1<<11))"
+       line 1128, "pan.___", state 872, "(((tmp2&((1<<7)-1))&&((tmp2^0)&(1<<7))))"
+       line 1128, "pan.___", state 872, "else"
+       line 1153, "pan.___", state 876, "_proc_urcu_writer = (_proc_urcu_writer&~(((1<<12)|(1<<11))))"
+       line 272, "pan.___", state 907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 916, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 931, "(1)"
+       line 284, "pan.___", state 938, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 954, "(1)"
+       line 253, "pan.___", state 962, "(1)"
+       line 257, "pan.___", state 974, "(1)"
+       line 261, "pan.___", state 982, "(1)"
+       line 276, "pan.___", state 1007, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1020, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1029, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1045, "(1)"
+       line 253, "pan.___", state 1053, "(1)"
+       line 257, "pan.___", state 1065, "(1)"
+       line 261, "pan.___", state 1073, "(1)"
+       line 272, "pan.___", state 1089, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 1091, "(1)"
+       line 276, "pan.___", state 1098, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1100, "(1)"
+       line 276, "pan.___", state 1101, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 1101, "else"
+       line 274, "pan.___", state 1106, "((i<1))"
+       line 274, "pan.___", state 1106, "((i>=1))"
+       line 280, "pan.___", state 1111, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1113, "(1)"
+       line 280, "pan.___", state 1114, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1114, "else"
+       line 284, "pan.___", state 1120, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1122, "(1)"
+       line 284, "pan.___", state 1123, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1123, "else"
+       line 282, "pan.___", state 1128, "((i<2))"
+       line 282, "pan.___", state 1128, "((i>=2))"
+       line 249, "pan.___", state 1136, "(1)"
+       line 253, "pan.___", state 1144, "(1)"
+       line 253, "pan.___", state 1145, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1145, "else"
+       line 251, "pan.___", state 1150, "((i<1))"
+       line 251, "pan.___", state 1150, "((i>=1))"
+       line 257, "pan.___", state 1156, "(1)"
+       line 257, "pan.___", state 1157, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1157, "else"
+       line 261, "pan.___", state 1164, "(1)"
+       line 261, "pan.___", state 1165, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1165, "else"
+       line 266, "pan.___", state 1174, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1174, "else"
+       line 299, "pan.___", state 1176, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 1176, "else"
+       line 276, "pan.___", state 1189, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1202, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1211, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1227, "(1)"
+       line 253, "pan.___", state 1235, "(1)"
+       line 257, "pan.___", state 1247, "(1)"
+       line 261, "pan.___", state 1255, "(1)"
+       line 1237, "pan.___", state 1270, "-end-"
+       (161 of 1270 states)
+unreached in proctype :init:
+       line 1248, "pan.___", state 9, "((j<2))"
+       line 1248, "pan.___", state 9, "((j>=2))"
+       line 1249, "pan.___", state 20, "((j<2))"
+       line 1249, "pan.___", state 20, "((j>=2))"
+       line 1254, "pan.___", state 33, "((j<2))"
+       line 1254, "pan.___", state 33, "((j>=2))"
+       line 1252, "pan.___", state 43, "((i<1))"
+       line 1252, "pan.___", state 43, "((i>=1))"
+       line 1262, "pan.___", state 54, "((j<2))"
+       line 1262, "pan.___", state 54, "((j>=2))"
+       line 1266, "pan.___", state 67, "((j<2))"
+       line 1266, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 1.49e+03 seconds
+pan: rate 1556.4638 states/second
+pan: avg transition delay 1.2468e-06 usec
+cp .input.spin urcu_free_single_flip.spin.input
+cp .input.spin.trail urcu_free_single_flip.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.spin.input b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.spin.input
new file mode 100644 (file)
index 0000000..38db318
--- /dev/null
@@ -0,0 +1,1273 @@
+#define SINGLE_FLIP
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.spin.input.trail b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_free_single_flip.spin.input.trail
new file mode 100644 (file)
index 0000000..fe03394
--- /dev/null
@@ -0,0 +1,1147 @@
+-2:3:-2
+-4:-4:-4
+1:0:4465
+2:3:4385
+3:3:4388
+4:3:4388
+5:3:4391
+6:3:4399
+7:3:4399
+8:3:4402
+9:3:4408
+10:3:4412
+11:3:4412
+12:3:4415
+13:3:4425
+14:3:4433
+15:3:4433
+16:3:4436
+17:3:4442
+18:3:4446
+19:3:4446
+20:3:4449
+21:3:4455
+22:3:4459
+23:3:4460
+24:0:4465
+25:3:4462
+26:0:4465
+27:2:3117
+28:0:4465
+29:2:3123
+30:0:4465
+31:2:3124
+32:0:4465
+33:2:3126
+34:0:4465
+35:2:3127
+36:0:4465
+37:2:3128
+38:0:4465
+39:2:3129
+40:0:4465
+41:2:3130
+42:0:4465
+43:2:3131
+44:0:4465
+45:2:3132
+46:2:3133
+47:2:3137
+48:2:3138
+49:2:3146
+50:2:3147
+51:2:3151
+52:2:3152
+53:2:3160
+54:2:3165
+55:2:3169
+56:2:3170
+57:2:3178
+58:2:3179
+59:2:3183
+60:2:3184
+61:2:3178
+62:2:3179
+63:2:3183
+64:2:3184
+65:2:3192
+66:2:3197
+67:2:3198
+68:2:3209
+69:2:3210
+70:2:3211
+71:2:3222
+72:2:3227
+73:2:3228
+74:2:3239
+75:2:3240
+76:2:3241
+77:2:3239
+78:2:3240
+79:2:3241
+80:2:3252
+81:2:3260
+82:0:4465
+83:2:3131
+84:0:4465
+85:2:3264
+86:2:3268
+87:2:3269
+88:2:3273
+89:2:3277
+90:2:3278
+91:2:3282
+92:2:3290
+93:2:3291
+94:2:3295
+95:2:3299
+96:2:3300
+97:2:3295
+98:2:3296
+99:2:3304
+100:0:4465
+101:2:3131
+102:0:4465
+103:2:3312
+104:2:3313
+105:2:3314
+106:0:4465
+107:2:3131
+108:0:4465
+109:2:3319
+110:0:4465
+111:2:4022
+112:2:4023
+113:2:4027
+114:2:4031
+115:2:4032
+116:2:4036
+117:2:4041
+118:2:4049
+119:2:4053
+120:2:4054
+121:2:4049
+122:2:4053
+123:2:4054
+124:2:4058
+125:2:4065
+126:2:4072
+127:2:4073
+128:2:4080
+129:2:4085
+130:2:4092
+131:2:4093
+132:2:4092
+133:2:4093
+134:2:4100
+135:2:4104
+136:0:4465
+137:2:3321
+138:2:4003
+139:0:4465
+140:2:3131
+141:0:4465
+142:2:3322
+143:0:4465
+144:2:3131
+145:0:4465
+146:2:3325
+147:2:3326
+148:2:3330
+149:2:3331
+150:2:3339
+151:2:3340
+152:2:3344
+153:2:3345
+154:2:3353
+155:2:3358
+156:2:3362
+157:2:3363
+158:2:3371
+159:2:3372
+160:2:3376
+161:2:3377
+162:2:3371
+163:2:3372
+164:2:3376
+165:2:3377
+166:2:3385
+167:2:3390
+168:2:3391
+169:2:3402
+170:2:3403
+171:2:3404
+172:2:3415
+173:2:3420
+174:2:3421
+175:2:3432
+176:2:3433
+177:2:3434
+178:2:3432
+179:2:3433
+180:2:3434
+181:2:3445
+182:2:3452
+183:0:4465
+184:2:3131
+185:0:4465
+186:2:3456
+187:2:3457
+188:2:3458
+189:2:3470
+190:2:3471
+191:2:3475
+192:2:3476
+193:2:3484
+194:2:3489
+195:2:3493
+196:2:3494
+197:2:3502
+198:2:3503
+199:2:3507
+200:2:3508
+201:2:3502
+202:2:3503
+203:2:3507
+204:2:3508
+205:2:3516
+206:2:3521
+207:2:3522
+208:2:3533
+209:2:3534
+210:2:3535
+211:2:3546
+212:2:3551
+213:2:3552
+214:2:3563
+215:2:3564
+216:2:3565
+217:2:3563
+218:2:3564
+219:2:3565
+220:2:3576
+221:2:3586
+222:2:3587
+223:0:4465
+224:2:3131
+225:0:4465
+226:2:3991
+227:0:4465
+228:2:4295
+229:2:4296
+230:2:4300
+231:2:4304
+232:2:4305
+233:2:4309
+234:2:4317
+235:2:4318
+236:2:4322
+237:2:4326
+238:2:4327
+239:2:4322
+240:2:4326
+241:2:4327
+242:2:4331
+243:2:4338
+244:2:4345
+245:2:4346
+246:2:4353
+247:2:4358
+248:2:4365
+249:2:4366
+250:2:4365
+251:2:4366
+252:2:4373
+253:2:4377
+254:0:4465
+255:2:3993
+256:2:4003
+257:0:4465
+258:2:3131
+259:0:4465
+260:2:3994
+261:2:3995
+262:0:4465
+263:2:3131
+264:0:4465
+265:2:3999
+266:0:4465
+267:2:4007
+268:0:4465
+269:2:3124
+270:0:4465
+271:2:3126
+272:0:4465
+273:2:3127
+274:0:4465
+275:2:3128
+276:0:4465
+277:2:3129
+278:0:4465
+279:2:3130
+280:0:4465
+281:2:3131
+282:0:4465
+283:2:3132
+284:2:3133
+285:2:3137
+286:2:3138
+287:2:3146
+288:2:3147
+289:2:3151
+290:2:3152
+291:2:3160
+292:2:3165
+293:2:3169
+294:2:3170
+295:2:3178
+296:2:3179
+297:2:3180
+298:2:3178
+299:2:3179
+300:2:3183
+301:2:3184
+302:2:3192
+303:2:3197
+304:2:3198
+305:2:3209
+306:2:3210
+307:2:3211
+308:2:3222
+309:2:3227
+310:2:3228
+311:2:3239
+312:2:3240
+313:2:3241
+314:2:3239
+315:2:3240
+316:2:3241
+317:2:3252
+318:2:3260
+319:0:4465
+320:2:3131
+321:0:4465
+322:2:3264
+323:2:3268
+324:2:3269
+325:2:3273
+326:2:3277
+327:2:3278
+328:2:3282
+329:2:3290
+330:2:3291
+331:2:3295
+332:2:3296
+333:2:3295
+334:2:3299
+335:2:3300
+336:2:3304
+337:0:4465
+338:2:3131
+339:0:4465
+340:2:3312
+341:2:3313
+342:2:3314
+343:0:4465
+344:2:3131
+345:0:4465
+346:2:3319
+347:0:4465
+348:1:2
+349:0:4465
+350:1:8
+351:0:4465
+352:1:9
+353:0:4465
+354:1:10
+355:0:4465
+356:1:11
+357:0:4465
+358:1:12
+359:1:13
+360:1:17
+361:1:18
+362:1:26
+363:1:27
+364:1:31
+365:1:32
+366:1:40
+367:1:45
+368:1:49
+369:1:50
+370:1:58
+371:1:59
+372:1:63
+373:1:64
+374:1:58
+375:1:59
+376:1:63
+377:1:64
+378:1:72
+379:1:84
+380:1:85
+381:1:89
+382:1:90
+383:1:91
+384:1:102
+385:1:107
+386:1:108
+387:1:119
+388:1:120
+389:1:121
+390:1:119
+391:1:120
+392:1:121
+393:1:132
+394:0:4465
+395:1:11
+396:0:4465
+397:1:141
+398:1:142
+399:0:4465
+400:1:11
+401:0:4465
+402:1:148
+403:1:149
+404:1:153
+405:1:154
+406:1:162
+407:1:163
+408:1:167
+409:1:168
+410:1:176
+411:1:181
+412:1:185
+413:1:186
+414:1:194
+415:1:195
+416:1:199
+417:1:200
+418:1:194
+419:1:195
+420:1:199
+421:1:200
+422:1:208
+423:1:220
+424:1:221
+425:1:225
+426:1:226
+427:1:227
+428:1:238
+429:1:243
+430:1:244
+431:1:255
+432:1:256
+433:1:257
+434:1:255
+435:1:256
+436:1:257
+437:1:268
+438:0:4465
+439:1:11
+440:0:4465
+441:1:277
+442:1:278
+443:1:282
+444:1:283
+445:1:291
+446:1:292
+447:1:296
+448:1:297
+449:1:305
+450:1:310
+451:1:314
+452:1:315
+453:1:323
+454:1:324
+455:1:328
+456:1:329
+457:1:323
+458:1:324
+459:1:328
+460:1:329
+461:1:337
+462:1:342
+463:1:343
+464:1:354
+465:1:355
+466:1:356
+467:1:367
+468:1:372
+469:1:373
+470:1:384
+471:1:385
+472:1:386
+473:1:384
+474:1:385
+475:1:386
+476:1:397
+477:1:404
+478:0:4465
+479:1:11
+480:0:4465
+481:1:540
+482:1:544
+483:1:545
+484:1:549
+485:1:550
+486:1:558
+487:1:566
+488:1:567
+489:1:571
+490:1:575
+491:1:576
+492:1:571
+493:1:575
+494:1:576
+495:1:580
+496:1:587
+497:1:594
+498:1:595
+499:1:602
+500:1:607
+501:1:614
+502:1:615
+503:1:614
+504:1:615
+505:1:622
+506:0:4465
+507:1:11
+508:0:4465
+509:2:4022
+510:2:4023
+511:2:4027
+512:2:4031
+513:2:4032
+514:2:4036
+515:2:4041
+516:2:4049
+517:2:4053
+518:2:4054
+519:2:4049
+520:2:4053
+521:2:4054
+522:2:4058
+523:2:4065
+524:2:4072
+525:2:4073
+526:2:4080
+527:2:4085
+528:2:4092
+529:2:4093
+530:2:4092
+531:2:4093
+532:2:4100
+533:2:4104
+534:0:4465
+535:2:3321
+536:2:4003
+537:0:4465
+538:2:3131
+539:0:4465
+540:2:3322
+541:0:4465
+542:2:3131
+543:0:4465
+544:2:3325
+545:2:3326
+546:2:3330
+547:2:3331
+548:2:3339
+549:2:3340
+550:2:3344
+551:2:3345
+552:2:3353
+553:2:3358
+554:2:3362
+555:2:3363
+556:2:3371
+557:2:3372
+558:2:3376
+559:2:3377
+560:2:3371
+561:2:3372
+562:2:3376
+563:2:3377
+564:2:3385
+565:2:3390
+566:2:3391
+567:2:3402
+568:2:3403
+569:2:3404
+570:2:3415
+571:2:3420
+572:2:3421
+573:2:3432
+574:2:3433
+575:2:3434
+576:2:3432
+577:2:3433
+578:2:3434
+579:2:3445
+580:2:3452
+581:0:4465
+582:2:3131
+583:0:4465
+584:2:3456
+585:2:3457
+586:2:3458
+587:2:3470
+588:2:3471
+589:2:3475
+590:2:3476
+591:2:3484
+592:2:3489
+593:2:3493
+594:2:3494
+595:2:3502
+596:2:3503
+597:2:3507
+598:2:3508
+599:2:3502
+600:2:3503
+601:2:3507
+602:2:3508
+603:2:3516
+604:2:3521
+605:2:3522
+606:2:3533
+607:2:3534
+608:2:3535
+609:2:3546
+610:2:3551
+611:2:3552
+612:2:3563
+613:2:3564
+614:2:3565
+615:2:3563
+616:2:3564
+617:2:3565
+618:2:3576
+619:2:3586
+620:2:3587
+621:0:4465
+622:2:3131
+623:0:4465
+624:2:3991
+625:0:4465
+626:2:4295
+627:2:4296
+628:2:4300
+629:2:4304
+630:2:4305
+631:2:4309
+632:2:4317
+633:2:4318
+634:2:4322
+635:2:4326
+636:2:4327
+637:2:4322
+638:2:4326
+639:2:4327
+640:2:4331
+641:2:4338
+642:2:4345
+643:2:4346
+644:2:4353
+645:2:4358
+646:2:4365
+647:2:4366
+648:2:4365
+649:2:4366
+650:2:4373
+651:2:4377
+652:0:4465
+653:2:3993
+654:2:4003
+655:0:4465
+656:2:3131
+657:0:4465
+658:2:3994
+659:2:3995
+660:0:4465
+661:2:3131
+662:0:4465
+663:2:3999
+664:0:4465
+665:2:4007
+666:0:4465
+667:2:3124
+668:0:4465
+669:2:3126
+670:0:4465
+671:2:3127
+672:0:4465
+673:2:3128
+674:0:4465
+675:2:3129
+676:0:4465
+677:2:3130
+678:0:4465
+679:2:3131
+680:0:4465
+681:2:3132
+682:2:3133
+683:2:3137
+684:2:3138
+685:2:3146
+686:2:3147
+687:2:3151
+688:2:3152
+689:2:3160
+690:2:3165
+691:2:3169
+692:2:3170
+693:2:3178
+694:2:3179
+695:2:3183
+696:2:3184
+697:2:3178
+698:2:3179
+699:2:3180
+700:2:3192
+701:2:3197
+702:2:3198
+703:2:3209
+704:2:3210
+705:2:3211
+706:2:3222
+707:2:3227
+708:2:3228
+709:2:3239
+710:2:3240
+711:2:3241
+712:2:3239
+713:2:3240
+714:2:3241
+715:2:3252
+716:2:3260
+717:0:4465
+718:2:3131
+719:0:4465
+720:1:632
+721:1:633
+722:1:637
+723:1:638
+724:1:646
+725:1:647
+726:1:651
+727:1:652
+728:1:660
+729:1:665
+730:1:669
+731:1:670
+732:1:678
+733:1:679
+734:1:683
+735:1:684
+736:1:678
+737:1:679
+738:1:683
+739:1:684
+740:1:692
+741:1:697
+742:1:698
+743:1:709
+744:1:710
+745:1:711
+746:1:722
+747:1:734
+748:1:735
+749:1:739
+750:1:740
+751:1:741
+752:1:739
+753:1:740
+754:1:741
+755:1:752
+756:0:4465
+757:1:11
+758:0:4465
+759:1:761
+760:1:764
+761:1:765
+762:0:4465
+763:1:11
+764:0:4465
+765:1:768
+766:1:769
+767:1:773
+768:1:774
+769:1:782
+770:1:783
+771:1:787
+772:1:788
+773:1:796
+774:1:801
+775:1:805
+776:1:806
+777:1:814
+778:1:815
+779:1:819
+780:1:820
+781:1:814
+782:1:815
+783:1:819
+784:1:820
+785:1:828
+786:1:833
+787:1:834
+788:1:845
+789:1:846
+790:1:847
+791:1:858
+792:1:870
+793:1:871
+794:1:875
+795:1:876
+796:1:877
+797:1:875
+798:1:876
+799:1:877
+800:1:888
+801:0:4465
+802:1:11
+803:0:4465
+804:1:1028
+805:1:1029
+806:1:1033
+807:1:1034
+808:1:1042
+809:1:1043
+810:1:1047
+811:1:1048
+812:1:1056
+813:1:1061
+814:1:1065
+815:1:1066
+816:1:1074
+817:1:1075
+818:1:1079
+819:1:1080
+820:1:1074
+821:1:1075
+822:1:1079
+823:1:1080
+824:1:1088
+825:1:1093
+826:1:1094
+827:1:1105
+828:1:1106
+829:1:1107
+830:1:1118
+831:1:1130
+832:1:1131
+833:1:1135
+834:1:1136
+835:1:1137
+836:1:1135
+837:1:1136
+838:1:1137
+839:1:1148
+840:1:1155
+841:1:1159
+842:0:4465
+843:1:11
+844:0:4465
+845:1:1160
+846:1:1161
+847:1:1165
+848:1:1166
+849:1:1174
+850:1:1175
+851:1:1176
+852:1:1188
+853:1:1193
+854:1:1197
+855:1:1198
+856:1:1206
+857:1:1207
+858:1:1211
+859:1:1212
+860:1:1206
+861:1:1207
+862:1:1211
+863:1:1212
+864:1:1220
+865:1:1225
+866:1:1226
+867:1:1237
+868:1:1238
+869:1:1239
+870:1:1250
+871:1:1262
+872:1:1263
+873:1:1267
+874:1:1268
+875:1:1269
+876:1:1267
+877:1:1268
+878:1:1269
+879:1:1280
+880:0:4465
+881:1:11
+882:0:4465
+883:1:1289
+884:0:4465
+885:1:3023
+886:1:3030
+887:1:3031
+888:1:3038
+889:1:3043
+890:1:3050
+891:1:3051
+892:1:3050
+893:1:3051
+894:1:3058
+895:1:3062
+896:0:4465
+897:2:3264
+898:2:3268
+899:2:3269
+900:2:3273
+901:2:3277
+902:2:3278
+903:2:3282
+904:2:3290
+905:2:3291
+906:2:3295
+907:2:3299
+908:2:3300
+909:2:3295
+910:2:3296
+911:2:3304
+912:0:4465
+913:2:3131
+914:0:4465
+915:2:3312
+916:2:3313
+917:2:3314
+918:0:4465
+919:2:3131
+920:0:4465
+921:2:3319
+922:0:4465
+923:2:4022
+924:2:4023
+925:2:4027
+926:2:4031
+927:2:4032
+928:2:4036
+929:2:4041
+930:2:4049
+931:2:4053
+932:2:4054
+933:2:4049
+934:2:4053
+935:2:4054
+936:2:4058
+937:2:4065
+938:2:4072
+939:2:4073
+940:2:4080
+941:2:4085
+942:2:4092
+943:2:4093
+944:2:4092
+945:2:4093
+946:2:4100
+947:2:4104
+948:0:4465
+949:2:3321
+950:2:4003
+951:0:4465
+952:2:3131
+953:0:4465
+954:2:3322
+955:0:4465
+956:2:3131
+957:0:4465
+958:2:3325
+959:2:3326
+960:2:3330
+961:2:3331
+962:2:3339
+963:2:3340
+964:2:3344
+965:2:3345
+966:2:3353
+967:2:3358
+968:2:3362
+969:2:3363
+970:2:3371
+971:2:3372
+972:2:3376
+973:2:3377
+974:2:3371
+975:2:3372
+976:2:3376
+977:2:3377
+978:2:3385
+979:2:3390
+980:2:3391
+981:2:3402
+982:2:3403
+983:2:3404
+984:2:3415
+985:2:3420
+986:2:3421
+987:2:3432
+988:2:3433
+989:2:3434
+990:2:3432
+991:2:3433
+992:2:3434
+993:2:3445
+994:2:3452
+995:0:4465
+996:2:3131
+997:0:4465
+998:2:3456
+999:2:3457
+1000:2:3458
+1001:2:3470
+1002:2:3471
+1003:2:3475
+1004:2:3476
+1005:2:3484
+1006:2:3489
+1007:2:3493
+1008:2:3494
+1009:2:3502
+1010:2:3503
+1011:2:3507
+1012:2:3508
+1013:2:3502
+1014:2:3503
+1015:2:3507
+1016:2:3508
+1017:2:3516
+1018:2:3521
+1019:2:3522
+1020:2:3533
+1021:2:3534
+1022:2:3535
+1023:2:3546
+1024:2:3551
+1025:2:3552
+1026:2:3563
+1027:2:3564
+1028:2:3565
+1029:2:3563
+1030:2:3564
+1031:2:3565
+1032:2:3576
+1033:2:3584
+1034:0:4465
+1035:2:3131
+1036:0:4465
+1037:2:3590
+1038:0:4465
+1039:2:4113
+1040:2:4114
+1041:2:4118
+1042:2:4122
+1043:2:4123
+1044:2:4127
+1045:2:4135
+1046:2:4136
+1047:2:4140
+1048:2:4144
+1049:2:4145
+1050:2:4140
+1051:2:4144
+1052:2:4145
+1053:2:4149
+1054:2:4156
+1055:2:4163
+1056:2:4164
+1057:2:4171
+1058:2:4176
+1059:2:4183
+1060:2:4184
+1061:2:4183
+1062:2:4184
+1063:2:4191
+1064:2:4195
+1065:0:4465
+1066:2:3592
+1067:2:3593
+1068:0:4465
+1069:2:3131
+1070:0:4465
+1071:2:3456
+1072:2:3457
+1073:2:3461
+1074:2:3462
+1075:2:3470
+1076:2:3471
+1077:2:3475
+1078:2:3476
+1079:2:3484
+1080:2:3489
+1081:2:3493
+1082:2:3494
+1083:2:3502
+1084:2:3503
+1085:2:3507
+1086:2:3508
+1087:2:3502
+1088:2:3503
+1089:2:3507
+1090:2:3508
+1091:2:3516
+1092:2:3521
+1093:2:3522
+1094:2:3533
+1095:2:3534
+1096:2:3535
+1097:2:3546
+1098:2:3551
+1099:2:3552
+1100:2:3563
+1101:2:3564
+1102:2:3565
+1103:2:3563
+1104:2:3564
+1105:2:3565
+1106:2:3576
+1107:2:3584
+1108:0:4465
+1109:2:3131
+1110:0:4465
+1111:2:3590
+1112:0:4465
+1113:2:4113
+1114:2:4114
+1115:2:4118
+1116:2:4122
+1117:2:4123
+1118:2:4127
+1119:2:4135
+1120:2:4136
+1121:2:4140
+1122:2:4144
+1123:2:4145
+1124:2:4140
+1125:2:4144
+1126:2:4145
+1127:2:4149
+1128:2:4156
+1129:2:4163
+1130:2:4164
+1131:2:4171
+1132:2:4176
+1133:2:4183
+1134:2:4184
+1135:2:4183
+1136:2:4184
+1137:2:4191
+1138:2:4195
+1139:0:4465
+1140:1:1291
+1141:1:1292
+1142:0:4463
+1143:1:11
+1144:0:4469
+1145:2:3684
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress.ltl b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress.ltl
new file mode 100644 (file)
index 0000000..8718641
--- /dev/null
@@ -0,0 +1 @@
+([] <> !np_)
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_reader.define b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_reader.define
new file mode 100644 (file)
index 0000000..ff3f783
--- /dev/null
@@ -0,0 +1 @@
+#define READER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_reader.log b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_reader.log
new file mode 100644 (file)
index 0000000..f854122
--- /dev/null
@@ -0,0 +1,512 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_reader.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+depth 23: Claim reached state 9 (line 1300)
+depth 1143: Claim reached state 9 (line 1299)
+Depth=    6515 States=    1e+06 Transitions= 5.44e+08 Memory=   507.561        t=    715 R=   1e+03
+Depth=    6515 States=    2e+06 Transitions= 1.44e+09 Memory=   545.940        t= 1.93e+03 R=   1e+03
+Depth=    6515 States=    3e+06 Transitions= 2.29e+09 Memory=   584.416        t= 3.08e+03 R=   1e+03
+pan: resizing hashtable to -w22..  done
+Depth=    6515 States=    4e+06 Transitions= 3.07e+09 Memory=   646.299        t= 4.13e+03 R=   1e+03
+Depth=    6515 States=    5e+06 Transitions= 3.84e+09 Memory=   681.065        t= 5.17e+03 R=   1e+03
+Depth=    6515 States=    6e+06 Transitions= 4.91e+09 Memory=   728.916        t= 6.63e+03 R=   9e+02
+Depth=    6515 States=    7e+06 Transitions= 5.88e+09 Memory=   767.393        t= 7.96e+03 R=   9e+02
+Depth=    6515 States=    8e+06 Transitions= 6.76e+09 Memory=   808.018        t= 9.16e+03 R=   9e+02
+Depth=    6515 States=    9e+06 Transitions= 7.69e+09 Memory=   846.885        t= 1.04e+04 R=   9e+02
+pan: resizing hashtable to -w24..  done
+Depth=    6515 States=    1e+07 Transitions= 8.63e+09 Memory=  1015.705        t= 1.17e+04 R=   9e+02
+Depth=    6515 States=  1.1e+07 Transitions= 9.44e+09 Memory=  1054.572        t= 1.28e+04 R=   9e+02
+Depth=    6515 States=  1.2e+07 Transitions= 1.02e+10 Memory=  1095.490        t= 1.39e+04 R=   9e+02
+Depth=    6515 States=  1.3e+07 Transitions= 1.11e+10 Memory=  1129.182        t= 1.51e+04 R=   9e+02
+Depth=    6515 States=  1.4e+07 Transitions= 1.22e+10 Memory=  1173.518        t= 1.65e+04 R=   8e+02
+Depth=    6515 States=  1.5e+07 Transitions=  1.3e+10 Memory=  1207.893        t= 1.76e+04 R=   9e+02
+Depth=    6515 States=  1.6e+07 Transitions= 1.38e+10 Memory=  1234.065        t= 1.86e+04 R=   9e+02
+Depth=    6515 States=  1.7e+07 Transitions= 1.47e+10 Memory=  1277.424        t= 1.99e+04 R=   9e+02
+Depth=    6515 States=  1.8e+07 Transitions= 1.58e+10 Memory=  1325.568        t= 2.13e+04 R=   8e+02
+Depth=    6515 States=  1.9e+07 Transitions= 1.67e+10 Memory=  1366.486        t= 2.26e+04 R=   8e+02
+Depth=    6515 States=    2e+07 Transitions= 1.76e+10 Memory=  1405.940        t= 2.38e+04 R=   8e+02
+Depth=    6515 States=  2.1e+07 Transitions= 1.85e+10 Memory=  1450.080        t= 2.51e+04 R=   8e+02
+Depth=    6515 States=  2.2e+07 Transitions= 1.94e+10 Memory=  1491.291        t= 2.63e+04 R=   8e+02
+Depth=    6515 States=  2.3e+07 Transitions= 2.02e+10 Memory=  1530.744        t= 2.74e+04 R=   8e+02
+Depth=    6515 States=  2.4e+07 Transitions= 2.12e+10 Memory=  1565.217        t= 2.87e+04 R=   8e+02
+Depth=    6515 States=  2.5e+07 Transitions= 2.22e+10 Memory=  1608.479        t=  3e+04 R=   8e+02
+Depth=    6515 States=  2.6e+07 Transitions=  2.3e+10 Memory=  1634.651        t= 3.11e+04 R=   8e+02
+Depth=    6515 States=  2.7e+07 Transitions=  2.4e+10 Memory=  1685.725        t= 3.25e+04 R=   8e+02
+Depth=    6515 States=  2.8e+07 Transitions= 2.49e+10 Memory=  1724.885        t= 3.37e+04 R=   8e+02
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 6515, errors: 0
+ 13561338 states, stored (2.88138e+07 visited)
+2.5567499e+10 states, matched
+2.5596313e+10 transitions (= visited+matched)
+1.4615258e+11 atomic steps
+hash conflicts: 8.6536644e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 1500.240      equivalent memory usage for states (stored*(State-vector + overhead))
+ 1175.180      actual memory usage for states (compression: 78.33%)
+               state-vector as stored = 63 byte + 28 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+ 1760.236      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 82, "(1)"
+       line 438, "pan.___", state 112, "(1)"
+       line 442, "pan.___", state 125, "(1)"
+       line 597, "pan.___", state 146, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 153, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 185, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 199, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 218, "(1)"
+       line 438, "pan.___", state 248, "(1)"
+       line 442, "pan.___", state 261, "(1)"
+       line 411, "pan.___", state 282, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 314, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 347, "(1)"
+       line 438, "pan.___", state 377, "(1)"
+       line 442, "pan.___", state 390, "(1)"
+       line 411, "pan.___", state 413, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 415, "(1)"
+       line 411, "pan.___", state 416, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 416, "else"
+       line 411, "pan.___", state 419, "(1)"
+       line 415, "pan.___", state 427, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 429, "(1)"
+       line 415, "pan.___", state 430, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 430, "else"
+       line 415, "pan.___", state 433, "(1)"
+       line 415, "pan.___", state 434, "(1)"
+       line 415, "pan.___", state 434, "(1)"
+       line 413, "pan.___", state 439, "((i<1))"
+       line 413, "pan.___", state 439, "((i>=1))"
+       line 420, "pan.___", state 445, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 447, "(1)"
+       line 420, "pan.___", state 448, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 448, "else"
+       line 420, "pan.___", state 451, "(1)"
+       line 420, "pan.___", state 452, "(1)"
+       line 420, "pan.___", state 452, "(1)"
+       line 424, "pan.___", state 459, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 461, "(1)"
+       line 424, "pan.___", state 462, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 462, "else"
+       line 424, "pan.___", state 465, "(1)"
+       line 424, "pan.___", state 466, "(1)"
+       line 424, "pan.___", state 466, "(1)"
+       line 422, "pan.___", state 471, "((i<2))"
+       line 422, "pan.___", state 471, "((i>=2))"
+       line 429, "pan.___", state 478, "(1)"
+       line 429, "pan.___", state 479, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 479, "else"
+       line 429, "pan.___", state 482, "(1)"
+       line 429, "pan.___", state 483, "(1)"
+       line 429, "pan.___", state 483, "(1)"
+       line 433, "pan.___", state 491, "(1)"
+       line 433, "pan.___", state 492, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 492, "else"
+       line 433, "pan.___", state 495, "(1)"
+       line 433, "pan.___", state 496, "(1)"
+       line 433, "pan.___", state 496, "(1)"
+       line 431, "pan.___", state 501, "((i<1))"
+       line 431, "pan.___", state 501, "((i>=1))"
+       line 438, "pan.___", state 508, "(1)"
+       line 438, "pan.___", state 509, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 509, "else"
+       line 438, "pan.___", state 512, "(1)"
+       line 438, "pan.___", state 513, "(1)"
+       line 438, "pan.___", state 513, "(1)"
+       line 442, "pan.___", state 521, "(1)"
+       line 442, "pan.___", state 522, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 522, "else"
+       line 442, "pan.___", state 525, "(1)"
+       line 442, "pan.___", state 526, "(1)"
+       line 442, "pan.___", state 526, "(1)"
+       line 440, "pan.___", state 531, "((i<2))"
+       line 440, "pan.___", state 531, "((i>=2))"
+       line 450, "pan.___", state 535, "(1)"
+       line 450, "pan.___", state 535, "(1)"
+       line 597, "pan.___", state 538, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 597, "pan.___", state 539, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 597, "pan.___", state 540, "(1)"
+       line 272, "pan.___", state 544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 555, "(1)"
+       line 280, "pan.___", state 566, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 575, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 591, "(1)"
+       line 253, "pan.___", state 599, "(1)"
+       line 257, "pan.___", state 611, "(1)"
+       line 261, "pan.___", state 619, "(1)"
+       line 411, "pan.___", state 637, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 651, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 669, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 683, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 702, "(1)"
+       line 433, "pan.___", state 715, "(1)"
+       line 438, "pan.___", state 732, "(1)"
+       line 442, "pan.___", state 745, "(1)"
+       line 411, "pan.___", state 773, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 805, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 819, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 838, "(1)"
+       line 438, "pan.___", state 868, "(1)"
+       line 442, "pan.___", state 881, "(1)"
+       line 411, "pan.___", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 904, "(1)"
+       line 411, "pan.___", state 905, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 905, "else"
+       line 411, "pan.___", state 908, "(1)"
+       line 415, "pan.___", state 916, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 918, "(1)"
+       line 415, "pan.___", state 919, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 919, "else"
+       line 415, "pan.___", state 922, "(1)"
+       line 415, "pan.___", state 923, "(1)"
+       line 415, "pan.___", state 923, "(1)"
+       line 413, "pan.___", state 928, "((i<1))"
+       line 413, "pan.___", state 928, "((i>=1))"
+       line 420, "pan.___", state 934, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 936, "(1)"
+       line 420, "pan.___", state 937, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 937, "else"
+       line 420, "pan.___", state 940, "(1)"
+       line 420, "pan.___", state 941, "(1)"
+       line 420, "pan.___", state 941, "(1)"
+       line 424, "pan.___", state 948, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 950, "(1)"
+       line 424, "pan.___", state 951, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 951, "else"
+       line 424, "pan.___", state 954, "(1)"
+       line 424, "pan.___", state 955, "(1)"
+       line 424, "pan.___", state 955, "(1)"
+       line 422, "pan.___", state 960, "((i<2))"
+       line 422, "pan.___", state 960, "((i>=2))"
+       line 429, "pan.___", state 967, "(1)"
+       line 429, "pan.___", state 968, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 968, "else"
+       line 429, "pan.___", state 971, "(1)"
+       line 429, "pan.___", state 972, "(1)"
+       line 429, "pan.___", state 972, "(1)"
+       line 433, "pan.___", state 980, "(1)"
+       line 433, "pan.___", state 981, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 981, "else"
+       line 433, "pan.___", state 984, "(1)"
+       line 433, "pan.___", state 985, "(1)"
+       line 433, "pan.___", state 985, "(1)"
+       line 431, "pan.___", state 990, "((i<1))"
+       line 431, "pan.___", state 990, "((i>=1))"
+       line 438, "pan.___", state 997, "(1)"
+       line 438, "pan.___", state 998, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 998, "else"
+       line 438, "pan.___", state 1001, "(1)"
+       line 438, "pan.___", state 1002, "(1)"
+       line 438, "pan.___", state 1002, "(1)"
+       line 442, "pan.___", state 1010, "(1)"
+       line 442, "pan.___", state 1011, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 1011, "else"
+       line 442, "pan.___", state 1014, "(1)"
+       line 442, "pan.___", state 1015, "(1)"
+       line 442, "pan.___", state 1015, "(1)"
+       line 440, "pan.___", state 1020, "((i<2))"
+       line 440, "pan.___", state 1020, "((i>=2))"
+       line 450, "pan.___", state 1024, "(1)"
+       line 450, "pan.___", state 1024, "(1)"
+       line 605, "pan.___", state 1028, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 1033, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1047, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1065, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1079, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1098, "(1)"
+       line 433, "pan.___", state 1111, "(1)"
+       line 438, "pan.___", state 1128, "(1)"
+       line 442, "pan.___", state 1141, "(1)"
+       line 411, "pan.___", state 1165, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1197, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1211, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1230, "(1)"
+       line 438, "pan.___", state 1260, "(1)"
+       line 442, "pan.___", state 1273, "(1)"
+       line 411, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1363, "(1)"
+       line 438, "pan.___", state 1393, "(1)"
+       line 442, "pan.___", state 1406, "(1)"
+       line 411, "pan.___", state 1427, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1492, "(1)"
+       line 438, "pan.___", state 1522, "(1)"
+       line 442, "pan.___", state 1535, "(1)"
+       line 272, "pan.___", state 1558, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1580, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1589, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1605, "(1)"
+       line 253, "pan.___", state 1613, "(1)"
+       line 257, "pan.___", state 1625, "(1)"
+       line 261, "pan.___", state 1633, "(1)"
+       line 411, "pan.___", state 1651, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1716, "(1)"
+       line 433, "pan.___", state 1729, "(1)"
+       line 438, "pan.___", state 1746, "(1)"
+       line 442, "pan.___", state 1759, "(1)"
+       line 411, "pan.___", state 1780, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1794, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1812, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1845, "(1)"
+       line 433, "pan.___", state 1858, "(1)"
+       line 438, "pan.___", state 1875, "(1)"
+       line 442, "pan.___", state 1888, "(1)"
+       line 411, "pan.___", state 1912, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1944, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1958, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1977, "(1)"
+       line 438, "pan.___", state 2007, "(1)"
+       line 442, "pan.___", state 2020, "(1)"
+       line 644, "pan.___", state 2041, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 2048, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2080, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2094, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2113, "(1)"
+       line 438, "pan.___", state 2143, "(1)"
+       line 442, "pan.___", state 2156, "(1)"
+       line 411, "pan.___", state 2177, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2209, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2242, "(1)"
+       line 438, "pan.___", state 2272, "(1)"
+       line 442, "pan.___", state 2285, "(1)"
+       line 411, "pan.___", state 2308, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2310, "(1)"
+       line 411, "pan.___", state 2311, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2311, "else"
+       line 411, "pan.___", state 2314, "(1)"
+       line 415, "pan.___", state 2322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2324, "(1)"
+       line 415, "pan.___", state 2325, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2325, "else"
+       line 415, "pan.___", state 2328, "(1)"
+       line 415, "pan.___", state 2329, "(1)"
+       line 415, "pan.___", state 2329, "(1)"
+       line 413, "pan.___", state 2334, "((i<1))"
+       line 413, "pan.___", state 2334, "((i>=1))"
+       line 420, "pan.___", state 2340, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2342, "(1)"
+       line 420, "pan.___", state 2343, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2343, "else"
+       line 420, "pan.___", state 2346, "(1)"
+       line 420, "pan.___", state 2347, "(1)"
+       line 420, "pan.___", state 2347, "(1)"
+       line 424, "pan.___", state 2354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2356, "(1)"
+       line 424, "pan.___", state 2357, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2357, "else"
+       line 424, "pan.___", state 2360, "(1)"
+       line 424, "pan.___", state 2361, "(1)"
+       line 424, "pan.___", state 2361, "(1)"
+       line 422, "pan.___", state 2366, "((i<2))"
+       line 422, "pan.___", state 2366, "((i>=2))"
+       line 429, "pan.___", state 2373, "(1)"
+       line 429, "pan.___", state 2374, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 2374, "else"
+       line 429, "pan.___", state 2377, "(1)"
+       line 429, "pan.___", state 2378, "(1)"
+       line 429, "pan.___", state 2378, "(1)"
+       line 433, "pan.___", state 2386, "(1)"
+       line 433, "pan.___", state 2387, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 2387, "else"
+       line 433, "pan.___", state 2390, "(1)"
+       line 433, "pan.___", state 2391, "(1)"
+       line 433, "pan.___", state 2391, "(1)"
+       line 431, "pan.___", state 2396, "((i<1))"
+       line 431, "pan.___", state 2396, "((i>=1))"
+       line 438, "pan.___", state 2403, "(1)"
+       line 438, "pan.___", state 2404, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 2404, "else"
+       line 438, "pan.___", state 2407, "(1)"
+       line 438, "pan.___", state 2408, "(1)"
+       line 438, "pan.___", state 2408, "(1)"
+       line 442, "pan.___", state 2416, "(1)"
+       line 442, "pan.___", state 2417, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 2417, "else"
+       line 442, "pan.___", state 2420, "(1)"
+       line 442, "pan.___", state 2421, "(1)"
+       line 442, "pan.___", state 2421, "(1)"
+       line 440, "pan.___", state 2426, "((i<2))"
+       line 440, "pan.___", state 2426, "((i>=2))"
+       line 450, "pan.___", state 2430, "(1)"
+       line 450, "pan.___", state 2430, "(1)"
+       line 644, "pan.___", state 2433, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 644, "pan.___", state 2434, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 644, "pan.___", state 2435, "(1)"
+       line 272, "pan.___", state 2439, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2461, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2470, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2486, "(1)"
+       line 253, "pan.___", state 2494, "(1)"
+       line 257, "pan.___", state 2506, "(1)"
+       line 261, "pan.___", state 2514, "(1)"
+       line 411, "pan.___", state 2532, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2546, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2564, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2578, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2597, "(1)"
+       line 433, "pan.___", state 2610, "(1)"
+       line 438, "pan.___", state 2627, "(1)"
+       line 442, "pan.___", state 2640, "(1)"
+       line 272, "pan.___", state 2664, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2673, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2686, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2695, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2711, "(1)"
+       line 253, "pan.___", state 2719, "(1)"
+       line 257, "pan.___", state 2731, "(1)"
+       line 261, "pan.___", state 2739, "(1)"
+       line 411, "pan.___", state 2757, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2771, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2789, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2803, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2822, "(1)"
+       line 433, "pan.___", state 2835, "(1)"
+       line 438, "pan.___", state 2852, "(1)"
+       line 442, "pan.___", state 2865, "(1)"
+       line 411, "pan.___", state 2886, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2900, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2918, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2951, "(1)"
+       line 433, "pan.___", state 2964, "(1)"
+       line 438, "pan.___", state 2981, "(1)"
+       line 442, "pan.___", state 2994, "(1)"
+       line 249, "pan.___", state 3027, "(1)"
+       line 257, "pan.___", state 3047, "(1)"
+       line 261, "pan.___", state 3055, "(1)"
+       line 249, "pan.___", state 3070, "(1)"
+       line 253, "pan.___", state 3078, "(1)"
+       line 257, "pan.___", state 3090, "(1)"
+       line 261, "pan.___", state 3098, "(1)"
+       line 898, "pan.___", state 3115, "-end-"
+       (283 of 3115 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 83, "(1)"
+       line 433, "pan.___", state 96, "(1)"
+       line 438, "pan.___", state 113, "(1)"
+       line 272, "pan.___", state 149, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 158, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 171, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 211, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 225, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 243, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 257, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 276, "(1)"
+       line 433, "pan.___", state 289, "(1)"
+       line 438, "pan.___", state 306, "(1)"
+       line 442, "pan.___", state 319, "(1)"
+       line 415, "pan.___", state 356, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 374, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 388, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 420, "(1)"
+       line 438, "pan.___", state 437, "(1)"
+       line 442, "pan.___", state 450, "(1)"
+       line 415, "pan.___", state 495, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 513, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 527, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 559, "(1)"
+       line 438, "pan.___", state 576, "(1)"
+       line 442, "pan.___", state 589, "(1)"
+       line 415, "pan.___", state 624, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 642, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 656, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 688, "(1)"
+       line 438, "pan.___", state 705, "(1)"
+       line 442, "pan.___", state 718, "(1)"
+       line 415, "pan.___", state 755, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 773, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 787, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 819, "(1)"
+       line 438, "pan.___", state 836, "(1)"
+       line 442, "pan.___", state 849, "(1)"
+       line 272, "pan.___", state 899, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 908, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 921, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 946, "(1)"
+       line 253, "pan.___", state 954, "(1)"
+       line 257, "pan.___", state 966, "(1)"
+       line 261, "pan.___", state 974, "(1)"
+       line 272, "pan.___", state 994, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1003, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1018, "(1)"
+       line 284, "pan.___", state 1025, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1041, "(1)"
+       line 253, "pan.___", state 1049, "(1)"
+       line 257, "pan.___", state 1061, "(1)"
+       line 261, "pan.___", state 1069, "(1)"
+       line 276, "pan.___", state 1094, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1107, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1132, "(1)"
+       line 253, "pan.___", state 1140, "(1)"
+       line 257, "pan.___", state 1152, "(1)"
+       line 261, "pan.___", state 1160, "(1)"
+       line 276, "pan.___", state 1185, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1198, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1207, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1223, "(1)"
+       line 253, "pan.___", state 1231, "(1)"
+       line 257, "pan.___", state 1243, "(1)"
+       line 261, "pan.___", state 1251, "(1)"
+       line 276, "pan.___", state 1276, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1289, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1298, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1314, "(1)"
+       line 253, "pan.___", state 1322, "(1)"
+       line 257, "pan.___", state 1334, "(1)"
+       line 261, "pan.___", state 1342, "(1)"
+       line 1237, "pan.___", state 1357, "-end-"
+       (78 of 1357 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1302, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 3.47e+04 seconds
+pan: rate 831.51243 states/second
+pan: avg transition delay 1.3538e-06 usec
+cp .input.spin urcu_progress_reader.spin.input
+cp .input.spin.trail urcu_progress_reader.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_reader.spin.input b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_reader.spin.input
new file mode 100644 (file)
index 0000000..da34f03
--- /dev/null
@@ -0,0 +1,1273 @@
+#define READER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer.define b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer.define
new file mode 100644 (file)
index 0000000..1e4417f
--- /dev/null
@@ -0,0 +1 @@
+#define WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer.log b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer.log
new file mode 100644 (file)
index 0000000..9bc0ba8
--- /dev/null
@@ -0,0 +1,505 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+depth 23: Claim reached state 9 (line 1300)
+depth 1559: Claim reached state 9 (line 1299)
+Depth=    4420 States=    1e+06 Transitions= 6.06e+08 Memory=   491.936        t=    782 R=   1e+03
+Depth=    4420 States=    2e+06 Transitions= 1.36e+09 Memory=   516.838        t= 1.76e+03 R=   1e+03
+Depth=    4477 States=    3e+06 Transitions= 1.91e+09 Memory=   542.424        t= 2.47e+03 R=   1e+03
+pan: resizing hashtable to -w22..  done
+Depth=    4770 States=    4e+06 Transitions= 2.71e+09 Memory=   598.545        t= 3.51e+03 R=   1e+03
+Depth=    4829 States=    5e+06 Transitions= 3.25e+09 Memory=   623.838        t= 4.2e+03 R=   1e+03
+Depth=    5120 States=    6e+06 Transitions= 4.02e+09 Memory=   648.838        t= 5.19e+03 R=   1e+03
+Depth=    5188 States=    7e+06 Transitions= 4.89e+09 Memory=   693.662        t= 6.32e+03 R=   1e+03
+Depth=    5188 States=    8e+06 Transitions= 5.86e+09 Memory=   742.588        t= 7.58e+03 R=   1e+03
+Depth=    5188 States=    9e+06 Transitions= 6.88e+09 Memory=   781.651        t= 8.92e+03 R=   1e+03
+pan: resizing hashtable to -w24..  done
+Depth=    5188 States=    1e+07 Transitions= 7.86e+09 Memory=   952.229        t= 1.02e+04 R=   1e+03
+Depth=    5188 States=  1.1e+07 Transitions= 8.82e+09 Memory=   991.096        t= 1.15e+04 R=   1e+03
+Depth=    5436 States=  1.2e+07 Transitions= 9.74e+09 Memory=  1031.037        t= 1.27e+04 R=   9e+02
+Depth=    5479 States=  1.3e+07 Transitions= 1.07e+10 Memory=  1074.006        t= 1.4e+04 R=   9e+02
+Depth=    5479 States=  1.4e+07 Transitions= 1.16e+10 Memory=  1117.658        t= 1.52e+04 R=   9e+02
+Depth=    5479 States=  1.5e+07 Transitions= 1.25e+10 Memory=  1160.529        t= 1.63e+04 R=   9e+02
+Depth=    5479 States=  1.6e+07 Transitions= 1.36e+10 Memory=  1208.772        t= 1.77e+04 R=   9e+02
+Depth=    5479 States=  1.7e+07 Transitions= 1.45e+10 Memory=  1253.401        t= 1.88e+04 R=   9e+02
+Depth=    5479 States=  1.8e+07 Transitions= 1.54e+10 Memory=  1278.986        t= 2.01e+04 R=   9e+02
+Depth=    5479 States=  1.9e+07 Transitions= 1.64e+10 Memory=  1333.186        t= 2.14e+04 R=   9e+02
+Depth=    5479 States=    2e+07 Transitions= 1.73e+10 Memory=  1370.979        t= 2.26e+04 R=   9e+02
+Depth=    5479 States=  2.1e+07 Transitions= 1.83e+10 Memory=  1415.998        t= 2.39e+04 R=   9e+02
+Depth=    5479 States=  2.2e+07 Transitions= 1.92e+10 Memory=  1455.940        t= 2.51e+04 R=   9e+02
+Depth=    5479 States=  2.3e+07 Transitions= 2.02e+10 Memory=  1499.397        t= 2.63e+04 R=   9e+02
+Depth=    5479 States=  2.4e+07 Transitions=  2.1e+10 Memory=  1542.951        t= 2.75e+04 R=   9e+02
+Depth=    5588 States=  2.5e+07 Transitions= 2.21e+10 Memory=  1589.533        t= 2.89e+04 R=   9e+02
+Depth=    5588 States=  2.6e+07 Transitions= 2.31e+10 Memory=  1630.647        t= 3.02e+04 R=   9e+02
+Depth=    5588 States=  2.7e+07 Transitions=  2.4e+10 Memory=  1669.123        t= 3.14e+04 R=   9e+02
+Depth=    5588 States=  2.8e+07 Transitions=  2.5e+10 Memory=  1714.533        t= 3.27e+04 R=   9e+02
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 5588, errors: 0
+ 13563866 states, stored (2.88596e+07 visited)
+2.5765769e+10 states, matched
+2.5794629e+10 transitions (= visited+matched)
+1.4717516e+11 atomic steps
+hash conflicts: 6.8547236e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 1500.519      equivalent memory usage for states (stored*(State-vector + overhead))
+ 1175.436      actual memory usage for states (compression: 78.34%)
+               state-vector as stored = 63 byte + 28 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+ 1760.432      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 82, "(1)"
+       line 438, "pan.___", state 112, "(1)"
+       line 442, "pan.___", state 125, "(1)"
+       line 597, "pan.___", state 146, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 153, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 185, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 199, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 218, "(1)"
+       line 438, "pan.___", state 248, "(1)"
+       line 442, "pan.___", state 261, "(1)"
+       line 411, "pan.___", state 282, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 314, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 347, "(1)"
+       line 438, "pan.___", state 377, "(1)"
+       line 442, "pan.___", state 390, "(1)"
+       line 411, "pan.___", state 413, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 415, "(1)"
+       line 411, "pan.___", state 416, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 416, "else"
+       line 411, "pan.___", state 419, "(1)"
+       line 415, "pan.___", state 427, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 429, "(1)"
+       line 415, "pan.___", state 430, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 430, "else"
+       line 415, "pan.___", state 433, "(1)"
+       line 415, "pan.___", state 434, "(1)"
+       line 415, "pan.___", state 434, "(1)"
+       line 413, "pan.___", state 439, "((i<1))"
+       line 413, "pan.___", state 439, "((i>=1))"
+       line 420, "pan.___", state 445, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 447, "(1)"
+       line 420, "pan.___", state 448, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 448, "else"
+       line 420, "pan.___", state 451, "(1)"
+       line 420, "pan.___", state 452, "(1)"
+       line 420, "pan.___", state 452, "(1)"
+       line 424, "pan.___", state 459, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 461, "(1)"
+       line 424, "pan.___", state 462, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 462, "else"
+       line 424, "pan.___", state 465, "(1)"
+       line 424, "pan.___", state 466, "(1)"
+       line 424, "pan.___", state 466, "(1)"
+       line 422, "pan.___", state 471, "((i<2))"
+       line 422, "pan.___", state 471, "((i>=2))"
+       line 429, "pan.___", state 478, "(1)"
+       line 429, "pan.___", state 479, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 479, "else"
+       line 429, "pan.___", state 482, "(1)"
+       line 429, "pan.___", state 483, "(1)"
+       line 429, "pan.___", state 483, "(1)"
+       line 433, "pan.___", state 491, "(1)"
+       line 433, "pan.___", state 492, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 492, "else"
+       line 433, "pan.___", state 495, "(1)"
+       line 433, "pan.___", state 496, "(1)"
+       line 433, "pan.___", state 496, "(1)"
+       line 431, "pan.___", state 501, "((i<1))"
+       line 431, "pan.___", state 501, "((i>=1))"
+       line 438, "pan.___", state 508, "(1)"
+       line 438, "pan.___", state 509, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 509, "else"
+       line 438, "pan.___", state 512, "(1)"
+       line 438, "pan.___", state 513, "(1)"
+       line 438, "pan.___", state 513, "(1)"
+       line 442, "pan.___", state 521, "(1)"
+       line 442, "pan.___", state 522, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 522, "else"
+       line 442, "pan.___", state 525, "(1)"
+       line 442, "pan.___", state 526, "(1)"
+       line 442, "pan.___", state 526, "(1)"
+       line 440, "pan.___", state 531, "((i<2))"
+       line 440, "pan.___", state 531, "((i>=2))"
+       line 450, "pan.___", state 535, "(1)"
+       line 450, "pan.___", state 535, "(1)"
+       line 597, "pan.___", state 538, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 597, "pan.___", state 539, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 597, "pan.___", state 540, "(1)"
+       line 272, "pan.___", state 544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 555, "(1)"
+       line 280, "pan.___", state 566, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 575, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 591, "(1)"
+       line 253, "pan.___", state 599, "(1)"
+       line 257, "pan.___", state 611, "(1)"
+       line 261, "pan.___", state 619, "(1)"
+       line 411, "pan.___", state 637, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 651, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 669, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 683, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 702, "(1)"
+       line 433, "pan.___", state 715, "(1)"
+       line 438, "pan.___", state 732, "(1)"
+       line 442, "pan.___", state 745, "(1)"
+       line 411, "pan.___", state 773, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 805, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 819, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 838, "(1)"
+       line 438, "pan.___", state 868, "(1)"
+       line 442, "pan.___", state 881, "(1)"
+       line 411, "pan.___", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 904, "(1)"
+       line 411, "pan.___", state 905, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 905, "else"
+       line 411, "pan.___", state 908, "(1)"
+       line 415, "pan.___", state 916, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 918, "(1)"
+       line 415, "pan.___", state 919, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 919, "else"
+       line 415, "pan.___", state 922, "(1)"
+       line 415, "pan.___", state 923, "(1)"
+       line 415, "pan.___", state 923, "(1)"
+       line 413, "pan.___", state 928, "((i<1))"
+       line 413, "pan.___", state 928, "((i>=1))"
+       line 420, "pan.___", state 934, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 936, "(1)"
+       line 420, "pan.___", state 937, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 937, "else"
+       line 420, "pan.___", state 940, "(1)"
+       line 420, "pan.___", state 941, "(1)"
+       line 420, "pan.___", state 941, "(1)"
+       line 424, "pan.___", state 948, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 950, "(1)"
+       line 424, "pan.___", state 951, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 951, "else"
+       line 424, "pan.___", state 954, "(1)"
+       line 424, "pan.___", state 955, "(1)"
+       line 424, "pan.___", state 955, "(1)"
+       line 422, "pan.___", state 960, "((i<2))"
+       line 422, "pan.___", state 960, "((i>=2))"
+       line 429, "pan.___", state 967, "(1)"
+       line 429, "pan.___", state 968, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 968, "else"
+       line 429, "pan.___", state 971, "(1)"
+       line 429, "pan.___", state 972, "(1)"
+       line 429, "pan.___", state 972, "(1)"
+       line 433, "pan.___", state 980, "(1)"
+       line 433, "pan.___", state 981, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 981, "else"
+       line 433, "pan.___", state 984, "(1)"
+       line 433, "pan.___", state 985, "(1)"
+       line 433, "pan.___", state 985, "(1)"
+       line 431, "pan.___", state 990, "((i<1))"
+       line 431, "pan.___", state 990, "((i>=1))"
+       line 438, "pan.___", state 997, "(1)"
+       line 438, "pan.___", state 998, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 998, "else"
+       line 438, "pan.___", state 1001, "(1)"
+       line 438, "pan.___", state 1002, "(1)"
+       line 438, "pan.___", state 1002, "(1)"
+       line 442, "pan.___", state 1010, "(1)"
+       line 442, "pan.___", state 1011, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 1011, "else"
+       line 442, "pan.___", state 1014, "(1)"
+       line 442, "pan.___", state 1015, "(1)"
+       line 442, "pan.___", state 1015, "(1)"
+       line 440, "pan.___", state 1020, "((i<2))"
+       line 440, "pan.___", state 1020, "((i>=2))"
+       line 450, "pan.___", state 1024, "(1)"
+       line 450, "pan.___", state 1024, "(1)"
+       line 605, "pan.___", state 1028, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 1033, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1047, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1065, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1079, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1098, "(1)"
+       line 433, "pan.___", state 1111, "(1)"
+       line 438, "pan.___", state 1128, "(1)"
+       line 442, "pan.___", state 1141, "(1)"
+       line 411, "pan.___", state 1165, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1197, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1211, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1230, "(1)"
+       line 438, "pan.___", state 1260, "(1)"
+       line 442, "pan.___", state 1273, "(1)"
+       line 411, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1363, "(1)"
+       line 438, "pan.___", state 1393, "(1)"
+       line 442, "pan.___", state 1406, "(1)"
+       line 411, "pan.___", state 1427, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1492, "(1)"
+       line 438, "pan.___", state 1522, "(1)"
+       line 442, "pan.___", state 1535, "(1)"
+       line 272, "pan.___", state 1558, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1580, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1589, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1605, "(1)"
+       line 253, "pan.___", state 1613, "(1)"
+       line 257, "pan.___", state 1625, "(1)"
+       line 261, "pan.___", state 1633, "(1)"
+       line 411, "pan.___", state 1651, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1716, "(1)"
+       line 433, "pan.___", state 1729, "(1)"
+       line 438, "pan.___", state 1746, "(1)"
+       line 442, "pan.___", state 1759, "(1)"
+       line 411, "pan.___", state 1780, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1794, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1812, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1845, "(1)"
+       line 433, "pan.___", state 1858, "(1)"
+       line 438, "pan.___", state 1875, "(1)"
+       line 442, "pan.___", state 1888, "(1)"
+       line 411, "pan.___", state 1912, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1944, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1958, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 1977, "(1)"
+       line 438, "pan.___", state 2007, "(1)"
+       line 442, "pan.___", state 2020, "(1)"
+       line 644, "pan.___", state 2041, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 2048, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2080, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2094, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2113, "(1)"
+       line 438, "pan.___", state 2143, "(1)"
+       line 442, "pan.___", state 2156, "(1)"
+       line 411, "pan.___", state 2177, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2209, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2242, "(1)"
+       line 438, "pan.___", state 2272, "(1)"
+       line 442, "pan.___", state 2285, "(1)"
+       line 411, "pan.___", state 2308, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2310, "(1)"
+       line 411, "pan.___", state 2311, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2311, "else"
+       line 411, "pan.___", state 2314, "(1)"
+       line 415, "pan.___", state 2322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2324, "(1)"
+       line 415, "pan.___", state 2325, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2325, "else"
+       line 415, "pan.___", state 2328, "(1)"
+       line 415, "pan.___", state 2329, "(1)"
+       line 415, "pan.___", state 2329, "(1)"
+       line 413, "pan.___", state 2334, "((i<1))"
+       line 413, "pan.___", state 2334, "((i>=1))"
+       line 420, "pan.___", state 2340, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2342, "(1)"
+       line 420, "pan.___", state 2343, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2343, "else"
+       line 420, "pan.___", state 2346, "(1)"
+       line 420, "pan.___", state 2347, "(1)"
+       line 420, "pan.___", state 2347, "(1)"
+       line 424, "pan.___", state 2354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2356, "(1)"
+       line 424, "pan.___", state 2357, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2357, "else"
+       line 424, "pan.___", state 2360, "(1)"
+       line 424, "pan.___", state 2361, "(1)"
+       line 424, "pan.___", state 2361, "(1)"
+       line 422, "pan.___", state 2366, "((i<2))"
+       line 422, "pan.___", state 2366, "((i>=2))"
+       line 429, "pan.___", state 2373, "(1)"
+       line 429, "pan.___", state 2374, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 429, "pan.___", state 2374, "else"
+       line 429, "pan.___", state 2377, "(1)"
+       line 429, "pan.___", state 2378, "(1)"
+       line 429, "pan.___", state 2378, "(1)"
+       line 433, "pan.___", state 2386, "(1)"
+       line 433, "pan.___", state 2387, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 433, "pan.___", state 2387, "else"
+       line 433, "pan.___", state 2390, "(1)"
+       line 433, "pan.___", state 2391, "(1)"
+       line 433, "pan.___", state 2391, "(1)"
+       line 431, "pan.___", state 2396, "((i<1))"
+       line 431, "pan.___", state 2396, "((i>=1))"
+       line 438, "pan.___", state 2403, "(1)"
+       line 438, "pan.___", state 2404, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 438, "pan.___", state 2404, "else"
+       line 438, "pan.___", state 2407, "(1)"
+       line 438, "pan.___", state 2408, "(1)"
+       line 438, "pan.___", state 2408, "(1)"
+       line 442, "pan.___", state 2416, "(1)"
+       line 442, "pan.___", state 2417, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 442, "pan.___", state 2417, "else"
+       line 442, "pan.___", state 2420, "(1)"
+       line 442, "pan.___", state 2421, "(1)"
+       line 442, "pan.___", state 2421, "(1)"
+       line 440, "pan.___", state 2426, "((i<2))"
+       line 440, "pan.___", state 2426, "((i>=2))"
+       line 450, "pan.___", state 2430, "(1)"
+       line 450, "pan.___", state 2430, "(1)"
+       line 644, "pan.___", state 2433, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 644, "pan.___", state 2434, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 644, "pan.___", state 2435, "(1)"
+       line 272, "pan.___", state 2439, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2461, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2470, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2486, "(1)"
+       line 253, "pan.___", state 2494, "(1)"
+       line 257, "pan.___", state 2506, "(1)"
+       line 261, "pan.___", state 2514, "(1)"
+       line 411, "pan.___", state 2532, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2546, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2564, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2578, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2597, "(1)"
+       line 433, "pan.___", state 2610, "(1)"
+       line 438, "pan.___", state 2627, "(1)"
+       line 442, "pan.___", state 2640, "(1)"
+       line 272, "pan.___", state 2664, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2673, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2686, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2695, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2711, "(1)"
+       line 253, "pan.___", state 2719, "(1)"
+       line 257, "pan.___", state 2731, "(1)"
+       line 261, "pan.___", state 2739, "(1)"
+       line 411, "pan.___", state 2757, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2771, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2789, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2803, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2822, "(1)"
+       line 433, "pan.___", state 2835, "(1)"
+       line 438, "pan.___", state 2852, "(1)"
+       line 442, "pan.___", state 2865, "(1)"
+       line 411, "pan.___", state 2886, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2900, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2918, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 2951, "(1)"
+       line 433, "pan.___", state 2964, "(1)"
+       line 438, "pan.___", state 2981, "(1)"
+       line 442, "pan.___", state 2994, "(1)"
+       line 249, "pan.___", state 3027, "(1)"
+       line 257, "pan.___", state 3047, "(1)"
+       line 261, "pan.___", state 3055, "(1)"
+       line 249, "pan.___", state 3070, "(1)"
+       line 253, "pan.___", state 3078, "(1)"
+       line 257, "pan.___", state 3090, "(1)"
+       line 261, "pan.___", state 3098, "(1)"
+       line 898, "pan.___", state 3115, "-end-"
+       (283 of 3115 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 83, "(1)"
+       line 433, "pan.___", state 96, "(1)"
+       line 438, "pan.___", state 113, "(1)"
+       line 272, "pan.___", state 149, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 158, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 171, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 211, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 225, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 243, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 257, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 429, "pan.___", state 276, "(1)"
+       line 433, "pan.___", state 289, "(1)"
+       line 438, "pan.___", state 306, "(1)"
+       line 442, "pan.___", state 319, "(1)"
+       line 415, "pan.___", state 356, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 374, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 388, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 420, "(1)"
+       line 438, "pan.___", state 437, "(1)"
+       line 442, "pan.___", state 450, "(1)"
+       line 415, "pan.___", state 495, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 513, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 527, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 559, "(1)"
+       line 438, "pan.___", state 576, "(1)"
+       line 442, "pan.___", state 589, "(1)"
+       line 415, "pan.___", state 624, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 642, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 656, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 688, "(1)"
+       line 438, "pan.___", state 705, "(1)"
+       line 442, "pan.___", state 718, "(1)"
+       line 415, "pan.___", state 755, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 773, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 787, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 433, "pan.___", state 819, "(1)"
+       line 438, "pan.___", state 836, "(1)"
+       line 442, "pan.___", state 849, "(1)"
+       line 272, "pan.___", state 904, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 913, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 928, "(1)"
+       line 284, "pan.___", state 935, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 951, "(1)"
+       line 253, "pan.___", state 959, "(1)"
+       line 257, "pan.___", state 971, "(1)"
+       line 261, "pan.___", state 979, "(1)"
+       line 276, "pan.___", state 1004, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1017, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1026, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1042, "(1)"
+       line 253, "pan.___", state 1050, "(1)"
+       line 257, "pan.___", state 1062, "(1)"
+       line 261, "pan.___", state 1070, "(1)"
+       line 276, "pan.___", state 1095, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1108, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1117, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1133, "(1)"
+       line 253, "pan.___", state 1141, "(1)"
+       line 257, "pan.___", state 1153, "(1)"
+       line 261, "pan.___", state 1161, "(1)"
+       line 276, "pan.___", state 1186, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1199, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1208, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1224, "(1)"
+       line 253, "pan.___", state 1232, "(1)"
+       line 257, "pan.___", state 1244, "(1)"
+       line 261, "pan.___", state 1252, "(1)"
+       line 1237, "pan.___", state 1267, "-end-"
+       (71 of 1267 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1302, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 3.38e+04 seconds
+pan: rate 854.75351 states/second
+pan: avg transition delay 1.3089e-06 usec
+cp .input.spin urcu_progress_writer.spin.input
+cp .input.spin.trail urcu_progress_writer.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer.spin.input b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer.spin.input
new file mode 100644 (file)
index 0000000..051f2f3
--- /dev/null
@@ -0,0 +1,1273 @@
+#define WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.define b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.define
new file mode 100644 (file)
index 0000000..8d304f5
--- /dev/null
@@ -0,0 +1,2 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.log b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.log
new file mode 100644 (file)
index 0000000..0912929
--- /dev/null
@@ -0,0 +1,757 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer_error.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1296)
+depth 23: Claim reached state 9 (line 1301)
+depth 1559: Claim reached state 9 (line 1300)
+pan: acceptance cycle (at depth 6380)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 7421, errors: 1
+    55319 states, stored (173044 visited)
+ 69543507 states, matched
+ 69716551 transitions (= visited+matched)
+3.7666148e+08 atomic steps
+hash conflicts:   1148679 (resolved)
+
+Stats on memory usage (in Megabytes):
+    6.120      equivalent memory usage for states (stored*(State-vector + overhead))
+    5.332      actual memory usage for states (compression: 87.12%)
+               state-vector as stored = 73 byte + 28 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  471.037      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 412, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 82, "(1)"
+       line 439, "pan.___", state 112, "(1)"
+       line 443, "pan.___", state 125, "(1)"
+       line 598, "pan.___", state 146, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 412, "pan.___", state 153, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 185, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 199, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 218, "(1)"
+       line 439, "pan.___", state 248, "(1)"
+       line 443, "pan.___", state 261, "(1)"
+       line 412, "pan.___", state 282, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 314, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 347, "(1)"
+       line 439, "pan.___", state 377, "(1)"
+       line 443, "pan.___", state 390, "(1)"
+       line 412, "pan.___", state 413, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 415, "(1)"
+       line 412, "pan.___", state 416, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 412, "pan.___", state 416, "else"
+       line 412, "pan.___", state 419, "(1)"
+       line 416, "pan.___", state 427, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 429, "(1)"
+       line 416, "pan.___", state 430, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 416, "pan.___", state 430, "else"
+       line 416, "pan.___", state 433, "(1)"
+       line 416, "pan.___", state 434, "(1)"
+       line 416, "pan.___", state 434, "(1)"
+       line 414, "pan.___", state 439, "((i<1))"
+       line 414, "pan.___", state 439, "((i>=1))"
+       line 421, "pan.___", state 445, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 447, "(1)"
+       line 421, "pan.___", state 448, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 421, "pan.___", state 448, "else"
+       line 421, "pan.___", state 451, "(1)"
+       line 421, "pan.___", state 452, "(1)"
+       line 421, "pan.___", state 452, "(1)"
+       line 425, "pan.___", state 459, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 461, "(1)"
+       line 425, "pan.___", state 462, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 425, "pan.___", state 462, "else"
+       line 425, "pan.___", state 465, "(1)"
+       line 425, "pan.___", state 466, "(1)"
+       line 425, "pan.___", state 466, "(1)"
+       line 423, "pan.___", state 471, "((i<2))"
+       line 423, "pan.___", state 471, "((i>=2))"
+       line 430, "pan.___", state 478, "(1)"
+       line 430, "pan.___", state 479, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 430, "pan.___", state 479, "else"
+       line 430, "pan.___", state 482, "(1)"
+       line 430, "pan.___", state 483, "(1)"
+       line 430, "pan.___", state 483, "(1)"
+       line 434, "pan.___", state 491, "(1)"
+       line 434, "pan.___", state 492, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 434, "pan.___", state 492, "else"
+       line 434, "pan.___", state 495, "(1)"
+       line 434, "pan.___", state 496, "(1)"
+       line 434, "pan.___", state 496, "(1)"
+       line 432, "pan.___", state 501, "((i<1))"
+       line 432, "pan.___", state 501, "((i>=1))"
+       line 439, "pan.___", state 508, "(1)"
+       line 439, "pan.___", state 509, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 439, "pan.___", state 509, "else"
+       line 439, "pan.___", state 512, "(1)"
+       line 439, "pan.___", state 513, "(1)"
+       line 439, "pan.___", state 513, "(1)"
+       line 443, "pan.___", state 521, "(1)"
+       line 443, "pan.___", state 522, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 443, "pan.___", state 522, "else"
+       line 443, "pan.___", state 525, "(1)"
+       line 443, "pan.___", state 526, "(1)"
+       line 443, "pan.___", state 526, "(1)"
+       line 441, "pan.___", state 531, "((i<2))"
+       line 441, "pan.___", state 531, "((i>=2))"
+       line 451, "pan.___", state 535, "(1)"
+       line 451, "pan.___", state 535, "(1)"
+       line 598, "pan.___", state 538, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 598, "pan.___", state 539, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 598, "pan.___", state 540, "(1)"
+       line 273, "pan.___", state 544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 277, "pan.___", state 555, "(1)"
+       line 281, "pan.___", state 566, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 285, "pan.___", state 575, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 591, "(1)"
+       line 254, "pan.___", state 599, "(1)"
+       line 258, "pan.___", state 611, "(1)"
+       line 262, "pan.___", state 619, "(1)"
+       line 412, "pan.___", state 637, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 651, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 669, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 683, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 702, "(1)"
+       line 434, "pan.___", state 715, "(1)"
+       line 439, "pan.___", state 732, "(1)"
+       line 443, "pan.___", state 745, "(1)"
+       line 412, "pan.___", state 773, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 805, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 819, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 838, "(1)"
+       line 439, "pan.___", state 868, "(1)"
+       line 443, "pan.___", state 881, "(1)"
+       line 412, "pan.___", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 904, "(1)"
+       line 412, "pan.___", state 905, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 412, "pan.___", state 905, "else"
+       line 412, "pan.___", state 908, "(1)"
+       line 416, "pan.___", state 916, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 918, "(1)"
+       line 416, "pan.___", state 919, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 416, "pan.___", state 919, "else"
+       line 416, "pan.___", state 922, "(1)"
+       line 416, "pan.___", state 923, "(1)"
+       line 416, "pan.___", state 923, "(1)"
+       line 414, "pan.___", state 928, "((i<1))"
+       line 414, "pan.___", state 928, "((i>=1))"
+       line 421, "pan.___", state 934, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 936, "(1)"
+       line 421, "pan.___", state 937, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 421, "pan.___", state 937, "else"
+       line 421, "pan.___", state 940, "(1)"
+       line 421, "pan.___", state 941, "(1)"
+       line 421, "pan.___", state 941, "(1)"
+       line 425, "pan.___", state 948, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 950, "(1)"
+       line 425, "pan.___", state 951, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 425, "pan.___", state 951, "else"
+       line 425, "pan.___", state 954, "(1)"
+       line 425, "pan.___", state 955, "(1)"
+       line 425, "pan.___", state 955, "(1)"
+       line 423, "pan.___", state 960, "((i<2))"
+       line 423, "pan.___", state 960, "((i>=2))"
+       line 430, "pan.___", state 967, "(1)"
+       line 430, "pan.___", state 968, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 430, "pan.___", state 968, "else"
+       line 430, "pan.___", state 971, "(1)"
+       line 430, "pan.___", state 972, "(1)"
+       line 430, "pan.___", state 972, "(1)"
+       line 434, "pan.___", state 980, "(1)"
+       line 434, "pan.___", state 981, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 434, "pan.___", state 981, "else"
+       line 434, "pan.___", state 984, "(1)"
+       line 434, "pan.___", state 985, "(1)"
+       line 434, "pan.___", state 985, "(1)"
+       line 432, "pan.___", state 990, "((i<1))"
+       line 432, "pan.___", state 990, "((i>=1))"
+       line 439, "pan.___", state 997, "(1)"
+       line 439, "pan.___", state 998, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 439, "pan.___", state 998, "else"
+       line 439, "pan.___", state 1001, "(1)"
+       line 439, "pan.___", state 1002, "(1)"
+       line 439, "pan.___", state 1002, "(1)"
+       line 443, "pan.___", state 1010, "(1)"
+       line 443, "pan.___", state 1011, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 443, "pan.___", state 1011, "else"
+       line 443, "pan.___", state 1014, "(1)"
+       line 443, "pan.___", state 1015, "(1)"
+       line 443, "pan.___", state 1015, "(1)"
+       line 441, "pan.___", state 1020, "((i<2))"
+       line 441, "pan.___", state 1020, "((i>=2))"
+       line 451, "pan.___", state 1024, "(1)"
+       line 451, "pan.___", state 1024, "(1)"
+       line 606, "pan.___", state 1028, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 412, "pan.___", state 1033, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 1047, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1065, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1079, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 1098, "(1)"
+       line 434, "pan.___", state 1111, "(1)"
+       line 439, "pan.___", state 1128, "(1)"
+       line 443, "pan.___", state 1141, "(1)"
+       line 412, "pan.___", state 1165, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1197, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1211, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 1230, "(1)"
+       line 439, "pan.___", state 1260, "(1)"
+       line 443, "pan.___", state 1273, "(1)"
+       line 412, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 1363, "(1)"
+       line 439, "pan.___", state 1393, "(1)"
+       line 443, "pan.___", state 1406, "(1)"
+       line 412, "pan.___", state 1427, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 1492, "(1)"
+       line 439, "pan.___", state 1522, "(1)"
+       line 443, "pan.___", state 1535, "(1)"
+       line 273, "pan.___", state 1558, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 281, "pan.___", state 1580, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 285, "pan.___", state 1589, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1605, "(1)"
+       line 254, "pan.___", state 1613, "(1)"
+       line 258, "pan.___", state 1625, "(1)"
+       line 262, "pan.___", state 1633, "(1)"
+       line 412, "pan.___", state 1651, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 1665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 1716, "(1)"
+       line 434, "pan.___", state 1729, "(1)"
+       line 439, "pan.___", state 1746, "(1)"
+       line 443, "pan.___", state 1759, "(1)"
+       line 412, "pan.___", state 1780, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 1794, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1812, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 1845, "(1)"
+       line 434, "pan.___", state 1858, "(1)"
+       line 439, "pan.___", state 1875, "(1)"
+       line 443, "pan.___", state 1888, "(1)"
+       line 412, "pan.___", state 1912, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1944, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1958, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 1977, "(1)"
+       line 439, "pan.___", state 2007, "(1)"
+       line 443, "pan.___", state 2020, "(1)"
+       line 645, "pan.___", state 2041, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 412, "pan.___", state 2048, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 2080, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 2094, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 2113, "(1)"
+       line 439, "pan.___", state 2143, "(1)"
+       line 443, "pan.___", state 2156, "(1)"
+       line 412, "pan.___", state 2177, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 2209, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 2223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 2242, "(1)"
+       line 439, "pan.___", state 2272, "(1)"
+       line 443, "pan.___", state 2285, "(1)"
+       line 412, "pan.___", state 2308, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 2310, "(1)"
+       line 412, "pan.___", state 2311, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 412, "pan.___", state 2311, "else"
+       line 412, "pan.___", state 2314, "(1)"
+       line 416, "pan.___", state 2322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 2324, "(1)"
+       line 416, "pan.___", state 2325, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 416, "pan.___", state 2325, "else"
+       line 416, "pan.___", state 2328, "(1)"
+       line 416, "pan.___", state 2329, "(1)"
+       line 416, "pan.___", state 2329, "(1)"
+       line 414, "pan.___", state 2334, "((i<1))"
+       line 414, "pan.___", state 2334, "((i>=1))"
+       line 421, "pan.___", state 2340, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 2342, "(1)"
+       line 421, "pan.___", state 2343, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 421, "pan.___", state 2343, "else"
+       line 421, "pan.___", state 2346, "(1)"
+       line 421, "pan.___", state 2347, "(1)"
+       line 421, "pan.___", state 2347, "(1)"
+       line 425, "pan.___", state 2354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 2356, "(1)"
+       line 425, "pan.___", state 2357, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 425, "pan.___", state 2357, "else"
+       line 425, "pan.___", state 2360, "(1)"
+       line 425, "pan.___", state 2361, "(1)"
+       line 425, "pan.___", state 2361, "(1)"
+       line 423, "pan.___", state 2366, "((i<2))"
+       line 423, "pan.___", state 2366, "((i>=2))"
+       line 430, "pan.___", state 2373, "(1)"
+       line 430, "pan.___", state 2374, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 430, "pan.___", state 2374, "else"
+       line 430, "pan.___", state 2377, "(1)"
+       line 430, "pan.___", state 2378, "(1)"
+       line 430, "pan.___", state 2378, "(1)"
+       line 434, "pan.___", state 2386, "(1)"
+       line 434, "pan.___", state 2387, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 434, "pan.___", state 2387, "else"
+       line 434, "pan.___", state 2390, "(1)"
+       line 434, "pan.___", state 2391, "(1)"
+       line 434, "pan.___", state 2391, "(1)"
+       line 432, "pan.___", state 2396, "((i<1))"
+       line 432, "pan.___", state 2396, "((i>=1))"
+       line 439, "pan.___", state 2403, "(1)"
+       line 439, "pan.___", state 2404, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 439, "pan.___", state 2404, "else"
+       line 439, "pan.___", state 2407, "(1)"
+       line 439, "pan.___", state 2408, "(1)"
+       line 439, "pan.___", state 2408, "(1)"
+       line 443, "pan.___", state 2416, "(1)"
+       line 443, "pan.___", state 2417, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 443, "pan.___", state 2417, "else"
+       line 443, "pan.___", state 2420, "(1)"
+       line 443, "pan.___", state 2421, "(1)"
+       line 443, "pan.___", state 2421, "(1)"
+       line 441, "pan.___", state 2426, "((i<2))"
+       line 441, "pan.___", state 2426, "((i>=2))"
+       line 451, "pan.___", state 2430, "(1)"
+       line 451, "pan.___", state 2430, "(1)"
+       line 645, "pan.___", state 2433, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 645, "pan.___", state 2434, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 645, "pan.___", state 2435, "(1)"
+       line 273, "pan.___", state 2439, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 281, "pan.___", state 2461, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 285, "pan.___", state 2470, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 2486, "(1)"
+       line 254, "pan.___", state 2494, "(1)"
+       line 258, "pan.___", state 2506, "(1)"
+       line 262, "pan.___", state 2514, "(1)"
+       line 412, "pan.___", state 2532, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 2546, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 2564, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 2578, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 2597, "(1)"
+       line 434, "pan.___", state 2610, "(1)"
+       line 439, "pan.___", state 2627, "(1)"
+       line 443, "pan.___", state 2640, "(1)"
+       line 273, "pan.___", state 2664, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 277, "pan.___", state 2673, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 281, "pan.___", state 2686, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 285, "pan.___", state 2695, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 2711, "(1)"
+       line 254, "pan.___", state 2719, "(1)"
+       line 258, "pan.___", state 2731, "(1)"
+       line 262, "pan.___", state 2739, "(1)"
+       line 412, "pan.___", state 2757, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 2771, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 2789, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 2803, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 2822, "(1)"
+       line 434, "pan.___", state 2835, "(1)"
+       line 439, "pan.___", state 2852, "(1)"
+       line 443, "pan.___", state 2865, "(1)"
+       line 412, "pan.___", state 2886, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 2900, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 2918, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 2932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 2951, "(1)"
+       line 434, "pan.___", state 2964, "(1)"
+       line 439, "pan.___", state 2981, "(1)"
+       line 443, "pan.___", state 2994, "(1)"
+       line 250, "pan.___", state 3027, "(1)"
+       line 258, "pan.___", state 3047, "(1)"
+       line 262, "pan.___", state 3055, "(1)"
+       line 250, "pan.___", state 3070, "(1)"
+       line 254, "pan.___", state 3078, "(1)"
+       line 258, "pan.___", state 3090, "(1)"
+       line 262, "pan.___", state 3098, "(1)"
+       line 899, "pan.___", state 3115, "-end-"
+       (283 of 3115 states)
+unreached in proctype urcu_writer
+       line 412, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 24, "(1)"
+       line 416, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 38, "(1)"
+       line 416, "pan.___", state 39, "(1)"
+       line 416, "pan.___", state 39, "(1)"
+       line 414, "pan.___", state 44, "((i<1))"
+       line 414, "pan.___", state 44, "((i>=1))"
+       line 421, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 56, "(1)"
+       line 421, "pan.___", state 57, "(1)"
+       line 421, "pan.___", state 57, "(1)"
+       line 425, "pan.___", state 64, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 70, "(1)"
+       line 425, "pan.___", state 71, "(1)"
+       line 425, "pan.___", state 71, "(1)"
+       line 423, "pan.___", state 76, "((i<2))"
+       line 423, "pan.___", state 76, "((i>=2))"
+       line 430, "pan.___", state 83, "(1)"
+       line 430, "pan.___", state 84, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 430, "pan.___", state 84, "else"
+       line 430, "pan.___", state 87, "(1)"
+       line 430, "pan.___", state 88, "(1)"
+       line 430, "pan.___", state 88, "(1)"
+       line 434, "pan.___", state 96, "(1)"
+       line 434, "pan.___", state 97, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 434, "pan.___", state 97, "else"
+       line 434, "pan.___", state 100, "(1)"
+       line 434, "pan.___", state 101, "(1)"
+       line 434, "pan.___", state 101, "(1)"
+       line 432, "pan.___", state 106, "((i<1))"
+       line 432, "pan.___", state 106, "((i>=1))"
+       line 439, "pan.___", state 113, "(1)"
+       line 439, "pan.___", state 114, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 439, "pan.___", state 114, "else"
+       line 439, "pan.___", state 117, "(1)"
+       line 439, "pan.___", state 118, "(1)"
+       line 439, "pan.___", state 118, "(1)"
+       line 443, "pan.___", state 126, "(1)"
+       line 443, "pan.___", state 127, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 443, "pan.___", state 127, "else"
+       line 443, "pan.___", state 130, "(1)"
+       line 443, "pan.___", state 131, "(1)"
+       line 443, "pan.___", state 131, "(1)"
+       line 441, "pan.___", state 136, "((i<2))"
+       line 441, "pan.___", state 136, "((i>=2))"
+       line 451, "pan.___", state 140, "(1)"
+       line 451, "pan.___", state 140, "(1)"
+       line 273, "pan.___", state 149, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 277, "pan.___", state 158, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 166, "((i<1))"
+       line 275, "pan.___", state 166, "((i>=1))"
+       line 281, "pan.___", state 171, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 1022, "pan.___", state 199, "old_data = cached_rcu_ptr.val[_pid]"
+       line 412, "pan.___", state 211, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 217, "(1)"
+       line 416, "pan.___", state 225, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 231, "(1)"
+       line 416, "pan.___", state 232, "(1)"
+       line 416, "pan.___", state 232, "(1)"
+       line 414, "pan.___", state 237, "((i<1))"
+       line 414, "pan.___", state 237, "((i>=1))"
+       line 421, "pan.___", state 243, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 249, "(1)"
+       line 421, "pan.___", state 250, "(1)"
+       line 421, "pan.___", state 250, "(1)"
+       line 425, "pan.___", state 257, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 263, "(1)"
+       line 425, "pan.___", state 264, "(1)"
+       line 425, "pan.___", state 264, "(1)"
+       line 423, "pan.___", state 269, "((i<2))"
+       line 423, "pan.___", state 269, "((i>=2))"
+       line 430, "pan.___", state 276, "(1)"
+       line 430, "pan.___", state 277, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 430, "pan.___", state 277, "else"
+       line 430, "pan.___", state 280, "(1)"
+       line 430, "pan.___", state 281, "(1)"
+       line 430, "pan.___", state 281, "(1)"
+       line 434, "pan.___", state 289, "(1)"
+       line 434, "pan.___", state 290, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 434, "pan.___", state 290, "else"
+       line 434, "pan.___", state 293, "(1)"
+       line 434, "pan.___", state 294, "(1)"
+       line 434, "pan.___", state 294, "(1)"
+       line 432, "pan.___", state 299, "((i<1))"
+       line 432, "pan.___", state 299, "((i>=1))"
+       line 439, "pan.___", state 306, "(1)"
+       line 439, "pan.___", state 307, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 439, "pan.___", state 307, "else"
+       line 439, "pan.___", state 310, "(1)"
+       line 439, "pan.___", state 311, "(1)"
+       line 439, "pan.___", state 311, "(1)"
+       line 443, "pan.___", state 319, "(1)"
+       line 443, "pan.___", state 320, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 443, "pan.___", state 320, "else"
+       line 443, "pan.___", state 323, "(1)"
+       line 443, "pan.___", state 324, "(1)"
+       line 443, "pan.___", state 324, "(1)"
+       line 441, "pan.___", state 329, "((i<2))"
+       line 441, "pan.___", state 329, "((i>=2))"
+       line 451, "pan.___", state 333, "(1)"
+       line 451, "pan.___", state 333, "(1)"
+       line 412, "pan.___", state 344, "(1)"
+       line 412, "pan.___", state 345, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 412, "pan.___", state 345, "else"
+       line 412, "pan.___", state 348, "(1)"
+       line 416, "pan.___", state 356, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 362, "(1)"
+       line 416, "pan.___", state 363, "(1)"
+       line 416, "pan.___", state 363, "(1)"
+       line 414, "pan.___", state 368, "((i<1))"
+       line 414, "pan.___", state 368, "((i>=1))"
+       line 421, "pan.___", state 374, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 380, "(1)"
+       line 421, "pan.___", state 381, "(1)"
+       line 421, "pan.___", state 381, "(1)"
+       line 425, "pan.___", state 388, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 394, "(1)"
+       line 425, "pan.___", state 395, "(1)"
+       line 425, "pan.___", state 395, "(1)"
+       line 423, "pan.___", state 400, "((i<2))"
+       line 423, "pan.___", state 400, "((i>=2))"
+       line 430, "pan.___", state 407, "(1)"
+       line 430, "pan.___", state 408, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 430, "pan.___", state 408, "else"
+       line 430, "pan.___", state 411, "(1)"
+       line 430, "pan.___", state 412, "(1)"
+       line 430, "pan.___", state 412, "(1)"
+       line 434, "pan.___", state 420, "(1)"
+       line 434, "pan.___", state 421, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 434, "pan.___", state 421, "else"
+       line 434, "pan.___", state 424, "(1)"
+       line 434, "pan.___", state 425, "(1)"
+       line 434, "pan.___", state 425, "(1)"
+       line 432, "pan.___", state 430, "((i<1))"
+       line 432, "pan.___", state 430, "((i>=1))"
+       line 439, "pan.___", state 437, "(1)"
+       line 439, "pan.___", state 438, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 439, "pan.___", state 438, "else"
+       line 439, "pan.___", state 441, "(1)"
+       line 439, "pan.___", state 442, "(1)"
+       line 439, "pan.___", state 442, "(1)"
+       line 443, "pan.___", state 450, "(1)"
+       line 443, "pan.___", state 451, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 443, "pan.___", state 451, "else"
+       line 443, "pan.___", state 454, "(1)"
+       line 443, "pan.___", state 455, "(1)"
+       line 443, "pan.___", state 455, "(1)"
+       line 441, "pan.___", state 460, "((i<2))"
+       line 441, "pan.___", state 460, "((i>=2))"
+       line 451, "pan.___", state 464, "(1)"
+       line 451, "pan.___", state 464, "(1)"
+       line 412, "pan.___", state 477, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 479, "(1)"
+       line 412, "pan.___", state 480, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 412, "pan.___", state 480, "else"
+       line 412, "pan.___", state 483, "(1)"
+       line 416, "pan.___", state 491, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 493, "(1)"
+       line 416, "pan.___", state 494, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 416, "pan.___", state 494, "else"
+       line 416, "pan.___", state 497, "(1)"
+       line 416, "pan.___", state 498, "(1)"
+       line 416, "pan.___", state 498, "(1)"
+       line 414, "pan.___", state 503, "((i<1))"
+       line 414, "pan.___", state 503, "((i>=1))"
+       line 421, "pan.___", state 509, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 511, "(1)"
+       line 421, "pan.___", state 512, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 421, "pan.___", state 512, "else"
+       line 421, "pan.___", state 515, "(1)"
+       line 421, "pan.___", state 516, "(1)"
+       line 421, "pan.___", state 516, "(1)"
+       line 425, "pan.___", state 523, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 525, "(1)"
+       line 425, "pan.___", state 526, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 425, "pan.___", state 526, "else"
+       line 425, "pan.___", state 529, "(1)"
+       line 425, "pan.___", state 530, "(1)"
+       line 425, "pan.___", state 530, "(1)"
+       line 423, "pan.___", state 535, "((i<2))"
+       line 423, "pan.___", state 535, "((i>=2))"
+       line 430, "pan.___", state 542, "(1)"
+       line 430, "pan.___", state 543, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 430, "pan.___", state 543, "else"
+       line 430, "pan.___", state 546, "(1)"
+       line 430, "pan.___", state 547, "(1)"
+       line 430, "pan.___", state 547, "(1)"
+       line 434, "pan.___", state 555, "(1)"
+       line 434, "pan.___", state 556, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 434, "pan.___", state 556, "else"
+       line 434, "pan.___", state 559, "(1)"
+       line 434, "pan.___", state 560, "(1)"
+       line 434, "pan.___", state 560, "(1)"
+       line 432, "pan.___", state 565, "((i<1))"
+       line 432, "pan.___", state 565, "((i>=1))"
+       line 439, "pan.___", state 572, "(1)"
+       line 439, "pan.___", state 573, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 439, "pan.___", state 573, "else"
+       line 439, "pan.___", state 576, "(1)"
+       line 439, "pan.___", state 577, "(1)"
+       line 439, "pan.___", state 577, "(1)"
+       line 443, "pan.___", state 585, "(1)"
+       line 443, "pan.___", state 586, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 443, "pan.___", state 586, "else"
+       line 443, "pan.___", state 589, "(1)"
+       line 443, "pan.___", state 590, "(1)"
+       line 443, "pan.___", state 590, "(1)"
+       line 451, "pan.___", state 599, "(1)"
+       line 451, "pan.___", state 599, "(1)"
+       line 412, "pan.___", state 605, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 611, "(1)"
+       line 416, "pan.___", state 619, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 625, "(1)"
+       line 416, "pan.___", state 626, "(1)"
+       line 416, "pan.___", state 626, "(1)"
+       line 414, "pan.___", state 631, "((i<1))"
+       line 414, "pan.___", state 631, "((i>=1))"
+       line 421, "pan.___", state 637, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 643, "(1)"
+       line 421, "pan.___", state 644, "(1)"
+       line 421, "pan.___", state 644, "(1)"
+       line 425, "pan.___", state 651, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 657, "(1)"
+       line 425, "pan.___", state 658, "(1)"
+       line 425, "pan.___", state 658, "(1)"
+       line 423, "pan.___", state 663, "((i<2))"
+       line 423, "pan.___", state 663, "((i>=2))"
+       line 430, "pan.___", state 670, "(1)"
+       line 430, "pan.___", state 671, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 430, "pan.___", state 671, "else"
+       line 430, "pan.___", state 674, "(1)"
+       line 430, "pan.___", state 675, "(1)"
+       line 430, "pan.___", state 675, "(1)"
+       line 434, "pan.___", state 683, "(1)"
+       line 434, "pan.___", state 684, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 434, "pan.___", state 684, "else"
+       line 434, "pan.___", state 687, "(1)"
+       line 434, "pan.___", state 688, "(1)"
+       line 434, "pan.___", state 688, "(1)"
+       line 432, "pan.___", state 693, "((i<1))"
+       line 432, "pan.___", state 693, "((i>=1))"
+       line 439, "pan.___", state 700, "(1)"
+       line 439, "pan.___", state 701, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 439, "pan.___", state 701, "else"
+       line 439, "pan.___", state 704, "(1)"
+       line 439, "pan.___", state 705, "(1)"
+       line 439, "pan.___", state 705, "(1)"
+       line 443, "pan.___", state 713, "(1)"
+       line 443, "pan.___", state 714, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 443, "pan.___", state 714, "else"
+       line 443, "pan.___", state 717, "(1)"
+       line 443, "pan.___", state 718, "(1)"
+       line 443, "pan.___", state 718, "(1)"
+       line 451, "pan.___", state 727, "(1)"
+       line 451, "pan.___", state 727, "(1)"
+       line 412, "pan.___", state 734, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 740, "(1)"
+       line 416, "pan.___", state 748, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 754, "(1)"
+       line 416, "pan.___", state 755, "(1)"
+       line 416, "pan.___", state 755, "(1)"
+       line 414, "pan.___", state 760, "((i<1))"
+       line 414, "pan.___", state 760, "((i>=1))"
+       line 421, "pan.___", state 766, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 772, "(1)"
+       line 421, "pan.___", state 773, "(1)"
+       line 421, "pan.___", state 773, "(1)"
+       line 425, "pan.___", state 780, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 786, "(1)"
+       line 425, "pan.___", state 787, "(1)"
+       line 425, "pan.___", state 787, "(1)"
+       line 423, "pan.___", state 792, "((i<2))"
+       line 423, "pan.___", state 792, "((i>=2))"
+       line 430, "pan.___", state 799, "(1)"
+       line 430, "pan.___", state 800, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 430, "pan.___", state 800, "else"
+       line 430, "pan.___", state 803, "(1)"
+       line 430, "pan.___", state 804, "(1)"
+       line 430, "pan.___", state 804, "(1)"
+       line 434, "pan.___", state 812, "(1)"
+       line 434, "pan.___", state 813, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 434, "pan.___", state 813, "else"
+       line 434, "pan.___", state 816, "(1)"
+       line 434, "pan.___", state 817, "(1)"
+       line 434, "pan.___", state 817, "(1)"
+       line 432, "pan.___", state 822, "((i<1))"
+       line 432, "pan.___", state 822, "((i>=1))"
+       line 439, "pan.___", state 829, "(1)"
+       line 439, "pan.___", state 830, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 439, "pan.___", state 830, "else"
+       line 439, "pan.___", state 833, "(1)"
+       line 439, "pan.___", state 834, "(1)"
+       line 439, "pan.___", state 834, "(1)"
+       line 443, "pan.___", state 842, "(1)"
+       line 443, "pan.___", state 843, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 443, "pan.___", state 843, "else"
+       line 443, "pan.___", state 846, "(1)"
+       line 443, "pan.___", state 847, "(1)"
+       line 443, "pan.___", state 847, "(1)"
+       line 441, "pan.___", state 852, "((i<2))"
+       line 441, "pan.___", state 852, "((i>=2))"
+       line 451, "pan.___", state 856, "(1)"
+       line 451, "pan.___", state 856, "(1)"
+       line 416, "pan.___", state 879, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 897, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 911, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 434, "pan.___", state 943, "(1)"
+       line 439, "pan.___", state 960, "(1)"
+       line 443, "pan.___", state 973, "(1)"
+       line 412, "pan.___", state 999, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 1013, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1031, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1045, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 430, "pan.___", state 1064, "(1)"
+       line 434, "pan.___", state 1077, "(1)"
+       line 439, "pan.___", state 1094, "(1)"
+       line 443, "pan.___", state 1107, "(1)"
+       line 273, "pan.___", state 1153, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 277, "pan.___", state 1162, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 1170, "((i<1))"
+       line 275, "pan.___", state 1170, "((i>=1))"
+       line 281, "pan.___", state 1177, "(1)"
+       line 281, "pan.___", state 1178, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 281, "pan.___", state 1178, "else"
+       line 285, "pan.___", state 1184, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1192, "((i<2))"
+       line 283, "pan.___", state 1192, "((i>=2))"
+       line 250, "pan.___", state 1200, "(1)"
+       line 254, "pan.___", state 1208, "(1)"
+       line 254, "pan.___", state 1209, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 1209, "else"
+       line 252, "pan.___", state 1214, "((i<1))"
+       line 252, "pan.___", state 1214, "((i>=1))"
+       line 258, "pan.___", state 1220, "(1)"
+       line 258, "pan.___", state 1221, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 1221, "else"
+       line 262, "pan.___", state 1228, "(1)"
+       line 262, "pan.___", state 1229, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 1229, "else"
+       line 267, "pan.___", state 1238, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 1238, "else"
+       line 277, "pan.___", state 1253, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 281, "pan.___", state 1266, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 285, "pan.___", state 1275, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1291, "(1)"
+       line 254, "pan.___", state 1299, "(1)"
+       line 258, "pan.___", state 1311, "(1)"
+       line 262, "pan.___", state 1319, "(1)"
+       line 1238, "pan.___", state 1334, "-end-"
+       (242 of 1334 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1303, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 88.5 seconds
+pan: rate 1954.1954 states/second
+pan: avg transition delay 1.2701e-06 usec
+cp .input.spin urcu_progress_writer_error.spin.input
+cp .input.spin.trail urcu_progress_writer_error.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-alpha-no-ipi'
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.spin.input b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.spin.input
new file mode 100644 (file)
index 0000000..ad4f91f
--- /dev/null
@@ -0,0 +1,1274 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+#define ARCH_ALPHA
+//#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.spin.input.trail b/formal-model/urcu-controldataflow-alpha-no-ipi/urcu_progress_writer_error.spin.input.trail
new file mode 100644 (file)
index 0000000..8ab0111
--- /dev/null
@@ -0,0 +1,7390 @@
+-2:3:-2
+-4:-4:-4
+1:0:4529
+2:3:4449
+3:3:4452
+4:3:4452
+5:3:4455
+6:3:4463
+7:3:4463
+8:3:4466
+9:3:4472
+10:3:4476
+11:3:4476
+12:3:4479
+13:3:4489
+14:3:4497
+15:3:4497
+16:3:4500
+17:3:4506
+18:3:4510
+19:3:4510
+20:3:4513
+21:3:4519
+22:3:4523
+23:3:4524
+24:0:4529
+25:3:4526
+26:0:4529
+27:2:3117
+28:0:4529
+29:2:3123
+30:0:4529
+31:2:3124
+32:0:4529
+33:2:3125
+34:0:4527
+35:2:3126
+36:0:4533
+37:2:3127
+38:0:4533
+39:2:3128
+40:2:3129
+41:2:3133
+42:2:3134
+43:2:3142
+44:2:3143
+45:2:3147
+46:2:3148
+47:2:3156
+48:2:3161
+49:2:3165
+50:2:3166
+51:2:3174
+52:2:3175
+53:2:3179
+54:2:3180
+55:2:3174
+56:2:3175
+57:2:3179
+58:2:3180
+59:2:3188
+60:2:3193
+61:2:3194
+62:2:3205
+63:2:3206
+64:2:3207
+65:2:3218
+66:2:3223
+67:2:3224
+68:2:3235
+69:2:3236
+70:2:3237
+71:2:3235
+72:2:3236
+73:2:3237
+74:2:3248
+75:2:3256
+76:0:4533
+77:2:3127
+78:0:4533
+79:2:3260
+80:2:3264
+81:2:3265
+82:2:3269
+83:2:3273
+84:2:3274
+85:2:3278
+86:2:3286
+87:2:3287
+88:2:3291
+89:2:3295
+90:2:3296
+91:2:3291
+92:2:3292
+93:2:3300
+94:0:4533
+95:2:3127
+96:0:4533
+97:2:3308
+98:2:3309
+99:2:3310
+100:0:4533
+101:2:3127
+102:0:4533
+103:2:3315
+104:0:4533
+105:2:4268
+106:2:4269
+107:2:4273
+108:2:4277
+109:2:4278
+110:2:4282
+111:2:4287
+112:2:4295
+113:2:4299
+114:2:4300
+115:2:4295
+116:2:4299
+117:2:4300
+118:2:4304
+119:2:4311
+120:2:4318
+121:2:4319
+122:2:4326
+123:2:4331
+124:2:4338
+125:2:4339
+126:2:4338
+127:2:4339
+128:2:4346
+129:2:4350
+130:0:4533
+131:2:3317
+132:2:4249
+133:0:4533
+134:2:3127
+135:0:4533
+136:2:3318
+137:0:4533
+138:2:3127
+139:0:4533
+140:2:3321
+141:2:3322
+142:2:3326
+143:2:3327
+144:2:3335
+145:2:3336
+146:2:3340
+147:2:3341
+148:2:3349
+149:2:3354
+150:2:3358
+151:2:3359
+152:2:3367
+153:2:3368
+154:2:3372
+155:2:3373
+156:2:3367
+157:2:3368
+158:2:3372
+159:2:3373
+160:2:3381
+161:2:3386
+162:2:3387
+163:2:3398
+164:2:3399
+165:2:3400
+166:2:3411
+167:2:3416
+168:2:3417
+169:2:3428
+170:2:3429
+171:2:3430
+172:2:3428
+173:2:3429
+174:2:3430
+175:2:3441
+176:2:3448
+177:0:4533
+178:2:3127
+179:0:4533
+180:2:3452
+181:2:3453
+182:2:3454
+183:2:3466
+184:2:3467
+185:2:3471
+186:2:3472
+187:2:3480
+188:2:3485
+189:2:3489
+190:2:3490
+191:2:3498
+192:2:3499
+193:2:3503
+194:2:3504
+195:2:3498
+196:2:3499
+197:2:3503
+198:2:3504
+199:2:3512
+200:2:3517
+201:2:3518
+202:2:3529
+203:2:3530
+204:2:3531
+205:2:3542
+206:2:3547
+207:2:3548
+208:2:3559
+209:2:3560
+210:2:3561
+211:2:3559
+212:2:3560
+213:2:3561
+214:2:3572
+215:2:3583
+216:2:3584
+217:0:4533
+218:2:3127
+219:0:4533
+220:2:3715
+221:2:3716
+222:2:3720
+223:2:3721
+224:2:3729
+225:2:3730
+226:2:3734
+227:2:3735
+228:2:3743
+229:2:3748
+230:2:3752
+231:2:3753
+232:2:3761
+233:2:3762
+234:2:3766
+235:2:3767
+236:2:3761
+237:2:3762
+238:2:3766
+239:2:3767
+240:2:3775
+241:2:3780
+242:2:3781
+243:2:3792
+244:2:3793
+245:2:3794
+246:2:3805
+247:2:3810
+248:2:3811
+249:2:3822
+250:2:3823
+251:2:3824
+252:2:3822
+253:2:3823
+254:2:3824
+255:2:3835
+256:0:4533
+257:2:3127
+258:0:4533
+259:2:3844
+260:2:3845
+261:2:3849
+262:2:3850
+263:2:3858
+264:2:3859
+265:2:3863
+266:2:3864
+267:2:3872
+268:2:3877
+269:2:3881
+270:2:3882
+271:2:3890
+272:2:3891
+273:2:3895
+274:2:3896
+275:2:3890
+276:2:3891
+277:2:3895
+278:2:3896
+279:2:3904
+280:2:3909
+281:2:3910
+282:2:3921
+283:2:3922
+284:2:3923
+285:2:3934
+286:2:3939
+287:2:3940
+288:2:3951
+289:2:3952
+290:2:3953
+291:2:3951
+292:2:3952
+293:2:3953
+294:2:3964
+295:2:3971
+296:0:4533
+297:2:3127
+298:0:4533
+299:1:2
+300:0:4533
+301:1:8
+302:0:4533
+303:1:9
+304:0:4533
+305:1:10
+306:0:4533
+307:1:11
+308:0:4533
+309:1:12
+310:1:13
+311:1:17
+312:1:18
+313:1:26
+314:1:27
+315:1:31
+316:1:32
+317:1:40
+318:1:45
+319:1:49
+320:1:50
+321:1:58
+322:1:59
+323:1:63
+324:1:64
+325:1:58
+326:1:59
+327:1:63
+328:1:64
+329:1:72
+330:1:77
+331:1:78
+332:1:89
+333:1:90
+334:1:91
+335:1:102
+336:1:107
+337:1:108
+338:1:119
+339:1:120
+340:1:121
+341:1:119
+342:1:120
+343:1:121
+344:1:132
+345:0:4533
+346:1:11
+347:0:4533
+348:1:141
+349:1:142
+350:0:4533
+351:1:11
+352:0:4533
+353:1:148
+354:1:149
+355:1:153
+356:1:154
+357:1:162
+358:1:163
+359:1:167
+360:1:168
+361:1:176
+362:1:181
+363:1:185
+364:1:186
+365:1:194
+366:1:195
+367:1:199
+368:1:200
+369:1:194
+370:1:195
+371:1:199
+372:1:200
+373:1:208
+374:1:213
+375:1:214
+376:1:225
+377:1:226
+378:1:227
+379:1:238
+380:1:243
+381:1:244
+382:1:255
+383:1:256
+384:1:257
+385:1:255
+386:1:256
+387:1:257
+388:1:268
+389:0:4533
+390:1:11
+391:0:4533
+392:1:277
+393:1:278
+394:1:282
+395:1:283
+396:1:291
+397:1:292
+398:1:296
+399:1:297
+400:1:305
+401:1:310
+402:1:314
+403:1:315
+404:1:323
+405:1:324
+406:1:328
+407:1:329
+408:1:323
+409:1:324
+410:1:328
+411:1:329
+412:1:337
+413:1:342
+414:1:343
+415:1:354
+416:1:355
+417:1:356
+418:1:367
+419:1:372
+420:1:373
+421:1:384
+422:1:385
+423:1:386
+424:1:384
+425:1:385
+426:1:386
+427:1:397
+428:1:404
+429:0:4533
+430:1:11
+431:0:4533
+432:1:540
+433:1:544
+434:1:545
+435:1:549
+436:1:550
+437:1:558
+438:1:566
+439:1:567
+440:1:571
+441:1:575
+442:1:576
+443:1:571
+444:1:575
+445:1:576
+446:1:580
+447:1:587
+448:1:594
+449:1:595
+450:1:602
+451:1:607
+452:1:614
+453:1:615
+454:1:614
+455:1:615
+456:1:622
+457:0:4533
+458:1:11
+459:0:4533
+460:2:3975
+461:2:3976
+462:2:3977
+463:2:3989
+464:2:3990
+465:2:3994
+466:2:3995
+467:2:4003
+468:2:4008
+469:2:4012
+470:2:4013
+471:2:4021
+472:2:4022
+473:2:4026
+474:2:4027
+475:2:4021
+476:2:4022
+477:2:4026
+478:2:4027
+479:2:4035
+480:2:4040
+481:2:4041
+482:2:4052
+483:2:4053
+484:2:4054
+485:2:4065
+486:2:4070
+487:2:4071
+488:2:4082
+489:2:4083
+490:2:4084
+491:2:4082
+492:2:4083
+493:2:4084
+494:2:4095
+495:2:4103
+496:0:4533
+497:2:3127
+498:0:4533
+499:2:4109
+500:2:4110
+501:2:4114
+502:2:4115
+503:2:4123
+504:2:4124
+505:2:4128
+506:2:4129
+507:2:4137
+508:2:4142
+509:2:4146
+510:2:4147
+511:2:4155
+512:2:4156
+513:2:4160
+514:2:4161
+515:2:4155
+516:2:4156
+517:2:4160
+518:2:4161
+519:2:4169
+520:2:4174
+521:2:4175
+522:2:4186
+523:2:4187
+524:2:4188
+525:2:4199
+526:2:4204
+527:2:4205
+528:2:4216
+529:2:4217
+530:2:4218
+531:2:4216
+532:2:4217
+533:2:4218
+534:2:4229
+535:0:4533
+536:2:3127
+537:0:4533
+538:1:632
+539:1:633
+540:1:637
+541:1:638
+542:1:646
+543:1:647
+544:1:651
+545:1:652
+546:1:660
+547:1:665
+548:1:669
+549:1:670
+550:1:678
+551:1:679
+552:1:683
+553:1:684
+554:1:678
+555:1:679
+556:1:683
+557:1:684
+558:1:692
+559:1:697
+560:1:698
+561:1:709
+562:1:710
+563:1:711
+564:1:722
+565:1:727
+566:1:728
+567:1:739
+568:1:740
+569:1:741
+570:1:739
+571:1:740
+572:1:741
+573:1:752
+574:0:4533
+575:1:11
+576:0:4533
+577:2:3975
+578:2:3976
+579:2:3980
+580:2:3981
+581:2:3989
+582:2:3990
+583:2:3994
+584:2:3995
+585:2:4003
+586:2:4008
+587:2:4012
+588:2:4013
+589:2:4021
+590:2:4022
+591:2:4026
+592:2:4027
+593:2:4021
+594:2:4022
+595:2:4026
+596:2:4027
+597:2:4035
+598:2:4040
+599:2:4041
+600:2:4052
+601:2:4053
+602:2:4054
+603:2:4065
+604:2:4070
+605:2:4071
+606:2:4082
+607:2:4083
+608:2:4084
+609:2:4082
+610:2:4083
+611:2:4084
+612:2:4095
+613:2:4103
+614:0:4533
+615:2:3127
+616:0:4533
+617:2:4109
+618:2:4110
+619:2:4114
+620:2:4115
+621:2:4123
+622:2:4124
+623:2:4128
+624:2:4129
+625:2:4137
+626:2:4142
+627:2:4146
+628:2:4147
+629:2:4155
+630:2:4156
+631:2:4160
+632:2:4161
+633:2:4155
+634:2:4156
+635:2:4160
+636:2:4161
+637:2:4169
+638:2:4174
+639:2:4175
+640:2:4186
+641:2:4187
+642:2:4188
+643:2:4199
+644:2:4204
+645:2:4205
+646:2:4216
+647:2:4217
+648:2:4218
+649:2:4216
+650:2:4217
+651:2:4218
+652:2:4229
+653:0:4533
+654:2:3127
+655:0:4533
+656:1:761
+657:1:764
+658:1:765
+659:0:4533
+660:1:11
+661:0:4533
+662:2:3975
+663:2:3976
+664:2:3980
+665:2:3981
+666:2:3989
+667:2:3990
+668:2:3994
+669:2:3995
+670:2:4003
+671:2:4008
+672:2:4012
+673:2:4013
+674:2:4021
+675:2:4022
+676:2:4026
+677:2:4027
+678:2:4021
+679:2:4022
+680:2:4026
+681:2:4027
+682:2:4035
+683:2:4040
+684:2:4041
+685:2:4052
+686:2:4053
+687:2:4054
+688:2:4065
+689:2:4070
+690:2:4071
+691:2:4082
+692:2:4083
+693:2:4084
+694:2:4082
+695:2:4083
+696:2:4084
+697:2:4095
+698:2:4103
+699:0:4533
+700:2:3127
+701:0:4533
+702:2:4109
+703:2:4110
+704:2:4114
+705:2:4115
+706:2:4123
+707:2:4124
+708:2:4128
+709:2:4129
+710:2:4137
+711:2:4142
+712:2:4146
+713:2:4147
+714:2:4155
+715:2:4156
+716:2:4160
+717:2:4161
+718:2:4155
+719:2:4156
+720:2:4160
+721:2:4161
+722:2:4169
+723:2:4174
+724:2:4175
+725:2:4186
+726:2:4187
+727:2:4188
+728:2:4199
+729:2:4204
+730:2:4205
+731:2:4216
+732:2:4217
+733:2:4218
+734:2:4216
+735:2:4217
+736:2:4218
+737:2:4229
+738:0:4533
+739:2:3127
+740:0:4533
+741:1:768
+742:1:769
+743:1:773
+744:1:774
+745:1:782
+746:1:783
+747:1:787
+748:1:788
+749:1:796
+750:1:801
+751:1:805
+752:1:806
+753:1:814
+754:1:815
+755:1:819
+756:1:820
+757:1:814
+758:1:815
+759:1:819
+760:1:820
+761:1:828
+762:1:833
+763:1:834
+764:1:845
+765:1:846
+766:1:847
+767:1:858
+768:1:863
+769:1:864
+770:1:875
+771:1:876
+772:1:877
+773:1:875
+774:1:876
+775:1:877
+776:1:888
+777:0:4533
+778:1:11
+779:0:4533
+780:2:3975
+781:2:3976
+782:2:3980
+783:2:3981
+784:2:3989
+785:2:3990
+786:2:3994
+787:2:3995
+788:2:4003
+789:2:4008
+790:2:4012
+791:2:4013
+792:2:4021
+793:2:4022
+794:2:4026
+795:2:4027
+796:2:4021
+797:2:4022
+798:2:4026
+799:2:4027
+800:2:4035
+801:2:4040
+802:2:4041
+803:2:4052
+804:2:4053
+805:2:4054
+806:2:4065
+807:2:4070
+808:2:4071
+809:2:4082
+810:2:4083
+811:2:4084
+812:2:4082
+813:2:4083
+814:2:4084
+815:2:4095
+816:2:4103
+817:0:4533
+818:2:3127
+819:0:4533
+820:2:4109
+821:2:4110
+822:2:4114
+823:2:4115
+824:2:4123
+825:2:4124
+826:2:4128
+827:2:4129
+828:2:4137
+829:2:4142
+830:2:4146
+831:2:4147
+832:2:4155
+833:2:4156
+834:2:4160
+835:2:4161
+836:2:4155
+837:2:4156
+838:2:4160
+839:2:4161
+840:2:4169
+841:2:4174
+842:2:4175
+843:2:4186
+844:2:4187
+845:2:4188
+846:2:4199
+847:2:4204
+848:2:4205
+849:2:4216
+850:2:4217
+851:2:4218
+852:2:4216
+853:2:4217
+854:2:4218
+855:2:4229
+856:0:4533
+857:2:3127
+858:0:4533
+859:1:1028
+860:1:1029
+861:1:1033
+862:1:1034
+863:1:1042
+864:1:1043
+865:1:1047
+866:1:1048
+867:1:1056
+868:1:1061
+869:1:1065
+870:1:1066
+871:1:1074
+872:1:1075
+873:1:1079
+874:1:1080
+875:1:1074
+876:1:1075
+877:1:1079
+878:1:1080
+879:1:1088
+880:1:1093
+881:1:1094
+882:1:1105
+883:1:1106
+884:1:1107
+885:1:1118
+886:1:1123
+887:1:1124
+888:1:1135
+889:1:1136
+890:1:1137
+891:1:1135
+892:1:1136
+893:1:1137
+894:1:1148
+895:1:1155
+896:1:1159
+897:0:4533
+898:1:11
+899:0:4533
+900:2:3975
+901:2:3976
+902:2:3980
+903:2:3981
+904:2:3989
+905:2:3990
+906:2:3994
+907:2:3995
+908:2:4003
+909:2:4008
+910:2:4012
+911:2:4013
+912:2:4021
+913:2:4022
+914:2:4026
+915:2:4027
+916:2:4021
+917:2:4022
+918:2:4026
+919:2:4027
+920:2:4035
+921:2:4040
+922:2:4041
+923:2:4052
+924:2:4053
+925:2:4054
+926:2:4065
+927:2:4070
+928:2:4071
+929:2:4082
+930:2:4083
+931:2:4084
+932:2:4082
+933:2:4083
+934:2:4084
+935:2:4095
+936:2:4103
+937:0:4533
+938:2:3127
+939:0:4533
+940:2:4109
+941:2:4110
+942:2:4114
+943:2:4115
+944:2:4123
+945:2:4124
+946:2:4128
+947:2:4129
+948:2:4137
+949:2:4142
+950:2:4146
+951:2:4147
+952:2:4155
+953:2:4156
+954:2:4160
+955:2:4161
+956:2:4155
+957:2:4156
+958:2:4160
+959:2:4161
+960:2:4169
+961:2:4174
+962:2:4175
+963:2:4186
+964:2:4187
+965:2:4188
+966:2:4199
+967:2:4204
+968:2:4205
+969:2:4216
+970:2:4217
+971:2:4218
+972:2:4216
+973:2:4217
+974:2:4218
+975:2:4229
+976:0:4533
+977:2:3127
+978:0:4533
+979:1:1160
+980:1:1161
+981:1:1165
+982:1:1166
+983:1:1174
+984:1:1175
+985:1:1176
+986:1:1188
+987:1:1193
+988:1:1197
+989:1:1198
+990:1:1206
+991:1:1207
+992:1:1211
+993:1:1212
+994:1:1206
+995:1:1207
+996:1:1211
+997:1:1212
+998:1:1220
+999:1:1225
+1000:1:1226
+1001:1:1237
+1002:1:1238
+1003:1:1239
+1004:1:1250
+1005:1:1255
+1006:1:1256
+1007:1:1267
+1008:1:1268
+1009:1:1269
+1010:1:1267
+1011:1:1268
+1012:1:1269
+1013:1:1280
+1014:0:4533
+1015:1:11
+1016:0:4533
+1017:2:3975
+1018:2:3976
+1019:2:3980
+1020:2:3981
+1021:2:3989
+1022:2:3990
+1023:2:3994
+1024:2:3995
+1025:2:4003
+1026:2:4008
+1027:2:4012
+1028:2:4013
+1029:2:4021
+1030:2:4022
+1031:2:4026
+1032:2:4027
+1033:2:4021
+1034:2:4022
+1035:2:4026
+1036:2:4027
+1037:2:4035
+1038:2:4040
+1039:2:4041
+1040:2:4052
+1041:2:4053
+1042:2:4054
+1043:2:4065
+1044:2:4070
+1045:2:4071
+1046:2:4082
+1047:2:4083
+1048:2:4084
+1049:2:4082
+1050:2:4083
+1051:2:4084
+1052:2:4095
+1053:2:4103
+1054:0:4533
+1055:2:3127
+1056:0:4533
+1057:2:4109
+1058:2:4110
+1059:2:4114
+1060:2:4115
+1061:2:4123
+1062:2:4124
+1063:2:4128
+1064:2:4129
+1065:2:4137
+1066:2:4142
+1067:2:4146
+1068:2:4147
+1069:2:4155
+1070:2:4156
+1071:2:4160
+1072:2:4161
+1073:2:4155
+1074:2:4156
+1075:2:4160
+1076:2:4161
+1077:2:4169
+1078:2:4174
+1079:2:4175
+1080:2:4186
+1081:2:4187
+1082:2:4188
+1083:2:4199
+1084:2:4204
+1085:2:4205
+1086:2:4216
+1087:2:4217
+1088:2:4218
+1089:2:4216
+1090:2:4217
+1091:2:4218
+1092:2:4229
+1093:0:4533
+1094:2:3127
+1095:0:4533
+1096:1:1289
+1097:0:4533
+1098:2:3975
+1099:2:3976
+1100:2:3980
+1101:2:3981
+1102:2:3989
+1103:2:3990
+1104:2:3994
+1105:2:3995
+1106:2:4003
+1107:2:4008
+1108:2:4012
+1109:2:4013
+1110:2:4021
+1111:2:4022
+1112:2:4026
+1113:2:4027
+1114:2:4021
+1115:2:4022
+1116:2:4026
+1117:2:4027
+1118:2:4035
+1119:2:4040
+1120:2:4041
+1121:2:4052
+1122:2:4053
+1123:2:4054
+1124:2:4065
+1125:2:4070
+1126:2:4071
+1127:2:4082
+1128:2:4083
+1129:2:4084
+1130:2:4082
+1131:2:4083
+1132:2:4084
+1133:2:4095
+1134:2:4103
+1135:0:4533
+1136:2:3127
+1137:0:4533
+1138:2:4109
+1139:2:4110
+1140:2:4114
+1141:2:4115
+1142:2:4123
+1143:2:4124
+1144:2:4128
+1145:2:4129
+1146:2:4137
+1147:2:4142
+1148:2:4146
+1149:2:4147
+1150:2:4155
+1151:2:4156
+1152:2:4160
+1153:2:4161
+1154:2:4155
+1155:2:4156
+1156:2:4160
+1157:2:4161
+1158:2:4169
+1159:2:4174
+1160:2:4175
+1161:2:4186
+1162:2:4187
+1163:2:4188
+1164:2:4199
+1165:2:4204
+1166:2:4205
+1167:2:4216
+1168:2:4217
+1169:2:4218
+1170:2:4216
+1171:2:4217
+1172:2:4218
+1173:2:4229
+1174:0:4533
+1175:2:3127
+1176:0:4533
+1177:1:3023
+1178:1:3030
+1179:1:3031
+1180:1:3038
+1181:1:3043
+1182:1:3050
+1183:1:3051
+1184:1:3050
+1185:1:3051
+1186:1:3058
+1187:1:3062
+1188:0:4533
+1189:2:3975
+1190:2:3976
+1191:2:3980
+1192:2:3981
+1193:2:3989
+1194:2:3990
+1195:2:3994
+1196:2:3995
+1197:2:4003
+1198:2:4008
+1199:2:4012
+1200:2:4013
+1201:2:4021
+1202:2:4022
+1203:2:4026
+1204:2:4027
+1205:2:4021
+1206:2:4022
+1207:2:4026
+1208:2:4027
+1209:2:4035
+1210:2:4040
+1211:2:4041
+1212:2:4052
+1213:2:4053
+1214:2:4054
+1215:2:4065
+1216:2:4070
+1217:2:4071
+1218:2:4082
+1219:2:4083
+1220:2:4084
+1221:2:4082
+1222:2:4083
+1223:2:4084
+1224:2:4095
+1225:2:4103
+1226:0:4533
+1227:2:3127
+1228:0:4533
+1229:2:4109
+1230:2:4110
+1231:2:4114
+1232:2:4115
+1233:2:4123
+1234:2:4124
+1235:2:4128
+1236:2:4129
+1237:2:4137
+1238:2:4142
+1239:2:4146
+1240:2:4147
+1241:2:4155
+1242:2:4156
+1243:2:4160
+1244:2:4161
+1245:2:4155
+1246:2:4156
+1247:2:4160
+1248:2:4161
+1249:2:4169
+1250:2:4174
+1251:2:4175
+1252:2:4186
+1253:2:4187
+1254:2:4188
+1255:2:4199
+1256:2:4204
+1257:2:4205
+1258:2:4216
+1259:2:4217
+1260:2:4218
+1261:2:4216
+1262:2:4217
+1263:2:4218
+1264:2:4229
+1265:0:4533
+1266:2:3127
+1267:0:4533
+1268:1:1291
+1269:1:1292
+1270:0:4533
+1271:1:11
+1272:0:4533
+1273:2:3975
+1274:2:3976
+1275:2:3980
+1276:2:3981
+1277:2:3989
+1278:2:3990
+1279:2:3994
+1280:2:3995
+1281:2:4003
+1282:2:4008
+1283:2:4012
+1284:2:4013
+1285:2:4021
+1286:2:4022
+1287:2:4026
+1288:2:4027
+1289:2:4021
+1290:2:4022
+1291:2:4026
+1292:2:4027
+1293:2:4035
+1294:2:4040
+1295:2:4041
+1296:2:4052
+1297:2:4053
+1298:2:4054
+1299:2:4065
+1300:2:4070
+1301:2:4071
+1302:2:4082
+1303:2:4083
+1304:2:4084
+1305:2:4082
+1306:2:4083
+1307:2:4084
+1308:2:4095
+1309:2:4103
+1310:0:4533
+1311:2:3127
+1312:0:4533
+1313:2:4109
+1314:2:4110
+1315:2:4114
+1316:2:4115
+1317:2:4123
+1318:2:4124
+1319:2:4128
+1320:2:4129
+1321:2:4137
+1322:2:4142
+1323:2:4146
+1324:2:4147
+1325:2:4155
+1326:2:4156
+1327:2:4160
+1328:2:4161
+1329:2:4155
+1330:2:4156
+1331:2:4160
+1332:2:4161
+1333:2:4169
+1334:2:4174
+1335:2:4175
+1336:2:4186
+1337:2:4187
+1338:2:4188
+1339:2:4199
+1340:2:4204
+1341:2:4205
+1342:2:4216
+1343:2:4217
+1344:2:4218
+1345:2:4216
+1346:2:4217
+1347:2:4218
+1348:2:4229
+1349:0:4533
+1350:2:3127
+1351:0:4533
+1352:1:1293
+1353:1:1294
+1354:1:1298
+1355:1:1299
+1356:1:1307
+1357:1:1308
+1358:1:1312
+1359:1:1313
+1360:1:1321
+1361:1:1326
+1362:1:1330
+1363:1:1331
+1364:1:1339
+1365:1:1340
+1366:1:1344
+1367:1:1345
+1368:1:1339
+1369:1:1340
+1370:1:1344
+1371:1:1345
+1372:1:1353
+1373:1:1358
+1374:1:1359
+1375:1:1370
+1376:1:1371
+1377:1:1372
+1378:1:1383
+1379:1:1388
+1380:1:1389
+1381:1:1400
+1382:1:1401
+1383:1:1402
+1384:1:1400
+1385:1:1401
+1386:1:1402
+1387:1:1413
+1388:0:4533
+1389:1:11
+1390:0:4533
+1391:2:3975
+1392:2:3976
+1393:2:3980
+1394:2:3981
+1395:2:3989
+1396:2:3990
+1397:2:3994
+1398:2:3995
+1399:2:4003
+1400:2:4008
+1401:2:4012
+1402:2:4013
+1403:2:4021
+1404:2:4022
+1405:2:4026
+1406:2:4027
+1407:2:4021
+1408:2:4022
+1409:2:4026
+1410:2:4027
+1411:2:4035
+1412:2:4040
+1413:2:4041
+1414:2:4052
+1415:2:4053
+1416:2:4054
+1417:2:4065
+1418:2:4070
+1419:2:4071
+1420:2:4082
+1421:2:4083
+1422:2:4084
+1423:2:4082
+1424:2:4083
+1425:2:4084
+1426:2:4095
+1427:2:4103
+1428:0:4533
+1429:2:3127
+1430:0:4533
+1431:2:4109
+1432:2:4110
+1433:2:4114
+1434:2:4115
+1435:2:4123
+1436:2:4124
+1437:2:4128
+1438:2:4129
+1439:2:4137
+1440:2:4142
+1441:2:4146
+1442:2:4147
+1443:2:4155
+1444:2:4156
+1445:2:4160
+1446:2:4161
+1447:2:4155
+1448:2:4156
+1449:2:4160
+1450:2:4161
+1451:2:4169
+1452:2:4174
+1453:2:4175
+1454:2:4186
+1455:2:4187
+1456:2:4188
+1457:2:4199
+1458:2:4204
+1459:2:4205
+1460:2:4216
+1461:2:4217
+1462:2:4218
+1463:2:4216
+1464:2:4217
+1465:2:4218
+1466:2:4229
+1467:0:4533
+1468:2:3127
+1469:0:4533
+1470:1:1422
+1471:1:1423
+1472:1:1427
+1473:1:1428
+1474:1:1436
+1475:1:1437
+1476:1:1441
+1477:1:1442
+1478:1:1450
+1479:1:1455
+1480:1:1459
+1481:1:1460
+1482:1:1468
+1483:1:1469
+1484:1:1473
+1485:1:1474
+1486:1:1468
+1487:1:1469
+1488:1:1473
+1489:1:1474
+1490:1:1482
+1491:1:1487
+1492:1:1488
+1493:1:1499
+1494:1:1500
+1495:1:1501
+1496:1:1512
+1497:1:1517
+1498:1:1518
+1499:1:1529
+1500:1:1530
+1501:1:1531
+1502:1:1529
+1503:1:1530
+1504:1:1531
+1505:1:1542
+1506:1:1549
+1507:1:1553
+1508:0:4533
+1509:1:11
+1510:0:4533
+1511:2:3975
+1512:2:3976
+1513:2:3980
+1514:2:3981
+1515:2:3989
+1516:2:3990
+1517:2:3994
+1518:2:3995
+1519:2:4003
+1520:2:4008
+1521:2:4012
+1522:2:4013
+1523:2:4021
+1524:2:4022
+1525:2:4026
+1526:2:4027
+1527:2:4021
+1528:2:4022
+1529:2:4026
+1530:2:4027
+1531:2:4035
+1532:2:4040
+1533:2:4041
+1534:2:4052
+1535:2:4053
+1536:2:4054
+1537:2:4065
+1538:2:4070
+1539:2:4071
+1540:2:4082
+1541:2:4083
+1542:2:4084
+1543:2:4082
+1544:2:4083
+1545:2:4084
+1546:2:4095
+1547:2:4103
+1548:0:4533
+1549:2:3127
+1550:0:4533
+1551:2:4109
+1552:2:4110
+1553:2:4114
+1554:2:4115
+1555:2:4123
+1556:2:4124
+1557:2:4128
+1558:2:4129
+1559:2:4137
+1560:2:4142
+1561:2:4146
+1562:2:4147
+1563:2:4155
+1564:2:4156
+1565:2:4160
+1566:2:4161
+1567:2:4155
+1568:2:4156
+1569:2:4160
+1570:2:4161
+1571:2:4169
+1572:2:4174
+1573:2:4175
+1574:2:4186
+1575:2:4187
+1576:2:4188
+1577:2:4199
+1578:2:4204
+1579:2:4205
+1580:2:4216
+1581:2:4217
+1582:2:4218
+1583:2:4216
+1584:2:4217
+1585:2:4218
+1586:2:4229
+1587:0:4533
+1588:2:3127
+1589:0:4533
+1590:1:1554
+1591:1:1558
+1592:1:1559
+1593:1:1563
+1594:1:1564
+1595:1:1572
+1596:1:1580
+1597:1:1581
+1598:1:1585
+1599:1:1589
+1600:1:1590
+1601:1:1585
+1602:1:1589
+1603:1:1590
+1604:1:1594
+1605:1:1601
+1606:1:1608
+1607:1:1609
+1608:1:1616
+1609:1:1621
+1610:1:1628
+1611:1:1629
+1612:1:1628
+1613:1:1629
+1614:1:1636
+1615:0:4533
+1616:1:11
+1617:0:4533
+1618:2:3975
+1619:2:3976
+1620:2:3980
+1621:2:3981
+1622:2:3989
+1623:2:3990
+1624:2:3994
+1625:2:3995
+1626:2:4003
+1627:2:4008
+1628:2:4012
+1629:2:4013
+1630:2:4021
+1631:2:4022
+1632:2:4026
+1633:2:4027
+1634:2:4021
+1635:2:4022
+1636:2:4026
+1637:2:4027
+1638:2:4035
+1639:2:4040
+1640:2:4041
+1641:2:4052
+1642:2:4053
+1643:2:4054
+1644:2:4065
+1645:2:4070
+1646:2:4071
+1647:2:4082
+1648:2:4083
+1649:2:4084
+1650:2:4082
+1651:2:4083
+1652:2:4084
+1653:2:4095
+1654:2:4103
+1655:0:4533
+1656:2:3127
+1657:0:4533
+1658:2:4109
+1659:2:4110
+1660:2:4114
+1661:2:4115
+1662:2:4123
+1663:2:4124
+1664:2:4128
+1665:2:4129
+1666:2:4137
+1667:2:4142
+1668:2:4146
+1669:2:4147
+1670:2:4155
+1671:2:4156
+1672:2:4160
+1673:2:4161
+1674:2:4155
+1675:2:4156
+1676:2:4160
+1677:2:4161
+1678:2:4169
+1679:2:4174
+1680:2:4175
+1681:2:4186
+1682:2:4187
+1683:2:4188
+1684:2:4199
+1685:2:4204
+1686:2:4205
+1687:2:4216
+1688:2:4217
+1689:2:4218
+1690:2:4216
+1691:2:4217
+1692:2:4218
+1693:2:4229
+1694:0:4533
+1695:2:3127
+1696:0:4533
+1697:1:1646
+1698:1:1647
+1699:1:1651
+1700:1:1652
+1701:1:1660
+1702:1:1661
+1703:1:1665
+1704:1:1666
+1705:1:1674
+1706:1:1679
+1707:1:1683
+1708:1:1684
+1709:1:1692
+1710:1:1693
+1711:1:1697
+1712:1:1698
+1713:1:1692
+1714:1:1693
+1715:1:1697
+1716:1:1698
+1717:1:1706
+1718:1:1711
+1719:1:1712
+1720:1:1723
+1721:1:1724
+1722:1:1725
+1723:1:1736
+1724:1:1741
+1725:1:1742
+1726:1:1753
+1727:1:1754
+1728:1:1755
+1729:1:1753
+1730:1:1754
+1731:1:1755
+1732:1:1766
+1733:0:4533
+1734:1:11
+1735:0:4533
+1736:2:3975
+1737:2:3976
+1738:2:3980
+1739:2:3981
+1740:2:3989
+1741:2:3990
+1742:2:3994
+1743:2:3995
+1744:2:4003
+1745:2:4008
+1746:2:4012
+1747:2:4013
+1748:2:4021
+1749:2:4022
+1750:2:4026
+1751:2:4027
+1752:2:4021
+1753:2:4022
+1754:2:4026
+1755:2:4027
+1756:2:4035
+1757:2:4040
+1758:2:4041
+1759:2:4052
+1760:2:4053
+1761:2:4054
+1762:2:4065
+1763:2:4070
+1764:2:4071
+1765:2:4082
+1766:2:4083
+1767:2:4084
+1768:2:4082
+1769:2:4083
+1770:2:4084
+1771:2:4095
+1772:2:4103
+1773:0:4533
+1774:2:3127
+1775:0:4533
+1776:2:4109
+1777:2:4110
+1778:2:4114
+1779:2:4115
+1780:2:4123
+1781:2:4124
+1782:2:4128
+1783:2:4129
+1784:2:4137
+1785:2:4142
+1786:2:4146
+1787:2:4147
+1788:2:4155
+1789:2:4156
+1790:2:4160
+1791:2:4161
+1792:2:4155
+1793:2:4156
+1794:2:4160
+1795:2:4161
+1796:2:4169
+1797:2:4174
+1798:2:4175
+1799:2:4186
+1800:2:4187
+1801:2:4188
+1802:2:4199
+1803:2:4204
+1804:2:4205
+1805:2:4216
+1806:2:4217
+1807:2:4218
+1808:2:4216
+1809:2:4217
+1810:2:4218
+1811:2:4229
+1812:0:4533
+1813:2:3127
+1814:0:4533
+1815:1:1775
+1816:1:1776
+1817:1:1780
+1818:1:1781
+1819:1:1789
+1820:1:1790
+1821:1:1794
+1822:1:1795
+1823:1:1803
+1824:1:1808
+1825:1:1812
+1826:1:1813
+1827:1:1821
+1828:1:1822
+1829:1:1826
+1830:1:1827
+1831:1:1821
+1832:1:1822
+1833:1:1826
+1834:1:1827
+1835:1:1835
+1836:1:1840
+1837:1:1841
+1838:1:1852
+1839:1:1853
+1840:1:1854
+1841:1:1865
+1842:1:1870
+1843:1:1871
+1844:1:1882
+1845:1:1883
+1846:1:1884
+1847:1:1882
+1848:1:1883
+1849:1:1884
+1850:1:1895
+1851:1:1902
+1852:1:1906
+1853:0:4533
+1854:1:11
+1855:0:4533
+1856:2:3975
+1857:2:3976
+1858:2:3980
+1859:2:3981
+1860:2:3989
+1861:2:3990
+1862:2:3994
+1863:2:3995
+1864:2:4003
+1865:2:4008
+1866:2:4012
+1867:2:4013
+1868:2:4021
+1869:2:4022
+1870:2:4026
+1871:2:4027
+1872:2:4021
+1873:2:4022
+1874:2:4026
+1875:2:4027
+1876:2:4035
+1877:2:4040
+1878:2:4041
+1879:2:4052
+1880:2:4053
+1881:2:4054
+1882:2:4065
+1883:2:4070
+1884:2:4071
+1885:2:4082
+1886:2:4083
+1887:2:4084
+1888:2:4082
+1889:2:4083
+1890:2:4084
+1891:2:4095
+1892:2:4103
+1893:0:4533
+1894:2:3127
+1895:0:4533
+1896:2:4109
+1897:2:4110
+1898:2:4114
+1899:2:4115
+1900:2:4123
+1901:2:4124
+1902:2:4128
+1903:2:4129
+1904:2:4137
+1905:2:4142
+1906:2:4146
+1907:2:4147
+1908:2:4155
+1909:2:4156
+1910:2:4160
+1911:2:4161
+1912:2:4155
+1913:2:4156
+1914:2:4160
+1915:2:4161
+1916:2:4169
+1917:2:4174
+1918:2:4175
+1919:2:4186
+1920:2:4187
+1921:2:4188
+1922:2:4199
+1923:2:4204
+1924:2:4205
+1925:2:4216
+1926:2:4217
+1927:2:4218
+1928:2:4216
+1929:2:4217
+1930:2:4218
+1931:2:4229
+1932:0:4533
+1933:2:3127
+1934:0:4533
+1935:1:1907
+1936:1:1908
+1937:1:1912
+1938:1:1913
+1939:1:1921
+1940:1:1922
+1941:1:1923
+1942:1:1935
+1943:1:1940
+1944:1:1944
+1945:1:1945
+1946:1:1953
+1947:1:1954
+1948:1:1958
+1949:1:1959
+1950:1:1953
+1951:1:1954
+1952:1:1958
+1953:1:1959
+1954:1:1967
+1955:1:1972
+1956:1:1973
+1957:1:1984
+1958:1:1985
+1959:1:1986
+1960:1:1997
+1961:1:2002
+1962:1:2003
+1963:1:2014
+1964:1:2015
+1965:1:2016
+1966:1:2014
+1967:1:2015
+1968:1:2016
+1969:1:2027
+1970:0:4533
+1971:1:11
+1972:0:4533
+1973:2:3975
+1974:2:3976
+1975:2:3980
+1976:2:3981
+1977:2:3989
+1978:2:3990
+1979:2:3994
+1980:2:3995
+1981:2:4003
+1982:2:4008
+1983:2:4012
+1984:2:4013
+1985:2:4021
+1986:2:4022
+1987:2:4026
+1988:2:4027
+1989:2:4021
+1990:2:4022
+1991:2:4026
+1992:2:4027
+1993:2:4035
+1994:2:4040
+1995:2:4041
+1996:2:4052
+1997:2:4060
+1998:2:4061
+1999:2:4065
+2000:2:4070
+2001:2:4071
+2002:2:4082
+2003:2:4083
+2004:2:4084
+2005:2:4082
+2006:2:4083
+2007:2:4084
+2008:2:4095
+2009:2:4103
+2010:0:4533
+2011:2:3127
+2012:0:4533
+2013:2:4109
+2014:2:4110
+2015:2:4114
+2016:2:4115
+2017:2:4123
+2018:2:4124
+2019:2:4128
+2020:2:4129
+2021:2:4137
+2022:2:4142
+2023:2:4146
+2024:2:4147
+2025:2:4155
+2026:2:4156
+2027:2:4160
+2028:2:4161
+2029:2:4155
+2030:2:4156
+2031:2:4160
+2032:2:4161
+2033:2:4169
+2034:2:4174
+2035:2:4175
+2036:2:4186
+2037:2:4194
+2038:2:4195
+2039:2:4199
+2040:2:4204
+2041:2:4205
+2042:2:4216
+2043:2:4217
+2044:2:4218
+2045:2:4216
+2046:2:4217
+2047:2:4218
+2048:2:4229
+2049:0:4533
+2050:2:3127
+2051:0:4533
+2052:1:2036
+2053:1:2037
+2054:0:4533
+2055:1:11
+2056:0:4533
+2057:2:3975
+2058:2:3976
+2059:2:3980
+2060:2:3981
+2061:2:3989
+2062:2:3990
+2063:2:3994
+2064:2:3995
+2065:2:4003
+2066:2:4008
+2067:2:4012
+2068:2:4013
+2069:2:4021
+2070:2:4022
+2071:2:4026
+2072:2:4027
+2073:2:4021
+2074:2:4022
+2075:2:4026
+2076:2:4027
+2077:2:4035
+2078:2:4040
+2079:2:4041
+2080:2:4052
+2081:2:4060
+2082:2:4061
+2083:2:4065
+2084:2:4070
+2085:2:4071
+2086:2:4082
+2087:2:4083
+2088:2:4084
+2089:2:4082
+2090:2:4083
+2091:2:4084
+2092:2:4095
+2093:2:4103
+2094:0:4533
+2095:2:3127
+2096:0:4533
+2097:2:4109
+2098:2:4110
+2099:2:4114
+2100:2:4115
+2101:2:4123
+2102:2:4124
+2103:2:4128
+2104:2:4129
+2105:2:4137
+2106:2:4142
+2107:2:4146
+2108:2:4147
+2109:2:4155
+2110:2:4156
+2111:2:4160
+2112:2:4161
+2113:2:4155
+2114:2:4156
+2115:2:4160
+2116:2:4161
+2117:2:4169
+2118:2:4174
+2119:2:4175
+2120:2:4186
+2121:2:4194
+2122:2:4195
+2123:2:4199
+2124:2:4204
+2125:2:4205
+2126:2:4216
+2127:2:4217
+2128:2:4218
+2129:2:4216
+2130:2:4217
+2131:2:4218
+2132:2:4229
+2133:0:4533
+2134:2:3127
+2135:0:4533
+2136:1:2043
+2137:1:2044
+2138:1:2048
+2139:1:2049
+2140:1:2057
+2141:1:2058
+2142:1:2062
+2143:1:2063
+2144:1:2071
+2145:1:2076
+2146:1:2080
+2147:1:2081
+2148:1:2089
+2149:1:2090
+2150:1:2094
+2151:1:2095
+2152:1:2089
+2153:1:2090
+2154:1:2094
+2155:1:2095
+2156:1:2103
+2157:1:2108
+2158:1:2109
+2159:1:2120
+2160:1:2121
+2161:1:2122
+2162:1:2133
+2163:1:2138
+2164:1:2139
+2165:1:2150
+2166:1:2151
+2167:1:2152
+2168:1:2150
+2169:1:2151
+2170:1:2152
+2171:1:2163
+2172:0:4533
+2173:1:11
+2174:0:4533
+2175:2:3975
+2176:2:3976
+2177:2:3980
+2178:2:3981
+2179:2:3989
+2180:2:3990
+2181:2:3994
+2182:2:3995
+2183:2:4003
+2184:2:4008
+2185:2:4012
+2186:2:4013
+2187:2:4021
+2188:2:4022
+2189:2:4026
+2190:2:4027
+2191:2:4021
+2192:2:4022
+2193:2:4026
+2194:2:4027
+2195:2:4035
+2196:2:4040
+2197:2:4041
+2198:2:4052
+2199:2:4060
+2200:2:4061
+2201:2:4065
+2202:2:4070
+2203:2:4071
+2204:2:4082
+2205:2:4083
+2206:2:4084
+2207:2:4082
+2208:2:4083
+2209:2:4084
+2210:2:4095
+2211:2:4103
+2212:0:4533
+2213:2:3127
+2214:0:4533
+2215:2:4109
+2216:2:4110
+2217:2:4114
+2218:2:4115
+2219:2:4123
+2220:2:4124
+2221:2:4128
+2222:2:4129
+2223:2:4137
+2224:2:4142
+2225:2:4146
+2226:2:4147
+2227:2:4155
+2228:2:4156
+2229:2:4160
+2230:2:4161
+2231:2:4155
+2232:2:4156
+2233:2:4160
+2234:2:4161
+2235:2:4169
+2236:2:4174
+2237:2:4175
+2238:2:4186
+2239:2:4194
+2240:2:4195
+2241:2:4199
+2242:2:4204
+2243:2:4205
+2244:2:4216
+2245:2:4217
+2246:2:4218
+2247:2:4216
+2248:2:4217
+2249:2:4218
+2250:2:4229
+2251:0:4533
+2252:2:3127
+2253:0:4533
+2254:1:2172
+2255:1:2173
+2256:1:2177
+2257:1:2178
+2258:1:2186
+2259:1:2187
+2260:1:2191
+2261:1:2192
+2262:1:2200
+2263:1:2205
+2264:1:2209
+2265:1:2210
+2266:1:2218
+2267:1:2219
+2268:1:2223
+2269:1:2224
+2270:1:2218
+2271:1:2219
+2272:1:2223
+2273:1:2224
+2274:1:2232
+2275:1:2237
+2276:1:2238
+2277:1:2249
+2278:1:2250
+2279:1:2251
+2280:1:2262
+2281:1:2267
+2282:1:2268
+2283:1:2279
+2284:1:2280
+2285:1:2281
+2286:1:2279
+2287:1:2280
+2288:1:2281
+2289:1:2292
+2290:1:2299
+2291:0:4533
+2292:1:11
+2293:0:4533
+2294:2:3975
+2295:2:3976
+2296:2:3980
+2297:2:3981
+2298:2:3989
+2299:2:3990
+2300:2:3994
+2301:2:3995
+2302:2:4003
+2303:2:4008
+2304:2:4012
+2305:2:4013
+2306:2:4021
+2307:2:4022
+2308:2:4026
+2309:2:4027
+2310:2:4021
+2311:2:4022
+2312:2:4026
+2313:2:4027
+2314:2:4035
+2315:2:4040
+2316:2:4041
+2317:2:4052
+2318:2:4060
+2319:2:4061
+2320:2:4065
+2321:2:4070
+2322:2:4071
+2323:2:4082
+2324:2:4083
+2325:2:4084
+2326:2:4082
+2327:2:4083
+2328:2:4084
+2329:2:4095
+2330:2:4103
+2331:0:4533
+2332:2:3127
+2333:0:4533
+2334:2:4109
+2335:2:4110
+2336:2:4114
+2337:2:4115
+2338:2:4123
+2339:2:4124
+2340:2:4128
+2341:2:4129
+2342:2:4137
+2343:2:4142
+2344:2:4146
+2345:2:4147
+2346:2:4155
+2347:2:4156
+2348:2:4160
+2349:2:4161
+2350:2:4155
+2351:2:4156
+2352:2:4160
+2353:2:4161
+2354:2:4169
+2355:2:4174
+2356:2:4175
+2357:2:4186
+2358:2:4194
+2359:2:4195
+2360:2:4199
+2361:2:4204
+2362:2:4205
+2363:2:4216
+2364:2:4217
+2365:2:4218
+2366:2:4216
+2367:2:4217
+2368:2:4218
+2369:2:4229
+2370:0:4533
+2371:2:3127
+2372:0:4533
+2373:1:2435
+2374:1:2439
+2375:1:2440
+2376:1:2444
+2377:1:2445
+2378:1:2453
+2379:1:2461
+2380:1:2462
+2381:1:2466
+2382:1:2470
+2383:1:2471
+2384:1:2466
+2385:1:2470
+2386:1:2471
+2387:1:2475
+2388:1:2482
+2389:1:2489
+2390:1:2490
+2391:1:2497
+2392:1:2502
+2393:1:2509
+2394:1:2510
+2395:1:2509
+2396:1:2510
+2397:1:2517
+2398:0:4533
+2399:1:11
+2400:0:4533
+2401:2:3975
+2402:2:3976
+2403:2:3980
+2404:2:3981
+2405:2:3989
+2406:2:3990
+2407:2:3994
+2408:2:3995
+2409:2:4003
+2410:2:4008
+2411:2:4012
+2412:2:4013
+2413:2:4021
+2414:2:4022
+2415:2:4026
+2416:2:4027
+2417:2:4021
+2418:2:4022
+2419:2:4026
+2420:2:4027
+2421:2:4035
+2422:2:4040
+2423:2:4041
+2424:2:4052
+2425:2:4060
+2426:2:4061
+2427:2:4065
+2428:2:4070
+2429:2:4071
+2430:2:4082
+2431:2:4083
+2432:2:4084
+2433:2:4082
+2434:2:4083
+2435:2:4084
+2436:2:4095
+2437:2:4103
+2438:0:4533
+2439:2:3127
+2440:0:4533
+2441:2:4109
+2442:2:4110
+2443:2:4114
+2444:2:4115
+2445:2:4123
+2446:2:4124
+2447:2:4128
+2448:2:4129
+2449:2:4137
+2450:2:4142
+2451:2:4146
+2452:2:4147
+2453:2:4155
+2454:2:4156
+2455:2:4160
+2456:2:4161
+2457:2:4155
+2458:2:4156
+2459:2:4160
+2460:2:4161
+2461:2:4169
+2462:2:4174
+2463:2:4175
+2464:2:4186
+2465:2:4194
+2466:2:4195
+2467:2:4199
+2468:2:4204
+2469:2:4205
+2470:2:4216
+2471:2:4217
+2472:2:4218
+2473:2:4216
+2474:2:4217
+2475:2:4218
+2476:2:4229
+2477:0:4533
+2478:2:3127
+2479:0:4533
+2480:1:2527
+2481:1:2528
+2482:1:2532
+2483:1:2533
+2484:1:2541
+2485:1:2542
+2486:1:2546
+2487:1:2547
+2488:1:2555
+2489:1:2560
+2490:1:2564
+2491:1:2565
+2492:1:2573
+2493:1:2574
+2494:1:2578
+2495:1:2579
+2496:1:2573
+2497:1:2574
+2498:1:2578
+2499:1:2579
+2500:1:2587
+2501:1:2592
+2502:1:2593
+2503:1:2604
+2504:1:2605
+2505:1:2606
+2506:1:2617
+2507:1:2622
+2508:1:2623
+2509:1:2634
+2510:1:2635
+2511:1:2636
+2512:1:2634
+2513:1:2635
+2514:1:2636
+2515:1:2647
+2516:0:4533
+2517:1:11
+2518:0:4533
+2519:2:3975
+2520:2:3976
+2521:2:3980
+2522:2:3981
+2523:2:3989
+2524:2:3990
+2525:2:3994
+2526:2:3995
+2527:2:4003
+2528:2:4008
+2529:2:4012
+2530:2:4013
+2531:2:4021
+2532:2:4022
+2533:2:4026
+2534:2:4027
+2535:2:4021
+2536:2:4022
+2537:2:4026
+2538:2:4027
+2539:2:4035
+2540:2:4040
+2541:2:4041
+2542:2:4052
+2543:2:4060
+2544:2:4061
+2545:2:4065
+2546:2:4070
+2547:2:4071
+2548:2:4082
+2549:2:4083
+2550:2:4084
+2551:2:4082
+2552:2:4083
+2553:2:4084
+2554:2:4095
+2555:2:4103
+2556:0:4533
+2557:2:3127
+2558:0:4533
+2559:2:4109
+2560:2:4110
+2561:2:4114
+2562:2:4115
+2563:2:4123
+2564:2:4124
+2565:2:4128
+2566:2:4129
+2567:2:4137
+2568:2:4142
+2569:2:4146
+2570:2:4147
+2571:2:4155
+2572:2:4156
+2573:2:4160
+2574:2:4161
+2575:2:4155
+2576:2:4156
+2577:2:4160
+2578:2:4161
+2579:2:4169
+2580:2:4174
+2581:2:4175
+2582:2:4186
+2583:2:4194
+2584:2:4195
+2585:2:4199
+2586:2:4204
+2587:2:4205
+2588:2:4216
+2589:2:4217
+2590:2:4218
+2591:2:4216
+2592:2:4217
+2593:2:4218
+2594:2:4229
+2595:0:4533
+2596:2:3127
+2597:0:4533
+2598:1:2656
+2599:0:4533
+2600:2:3975
+2601:2:3976
+2602:2:3980
+2603:2:3981
+2604:2:3989
+2605:2:3990
+2606:2:3994
+2607:2:3995
+2608:2:4003
+2609:2:4008
+2610:2:4012
+2611:2:4013
+2612:2:4021
+2613:2:4022
+2614:2:4026
+2615:2:4027
+2616:2:4021
+2617:2:4022
+2618:2:4026
+2619:2:4027
+2620:2:4035
+2621:2:4040
+2622:2:4041
+2623:2:4052
+2624:2:4060
+2625:2:4061
+2626:2:4065
+2627:2:4070
+2628:2:4071
+2629:2:4082
+2630:2:4083
+2631:2:4084
+2632:2:4082
+2633:2:4083
+2634:2:4084
+2635:2:4095
+2636:2:4103
+2637:0:4533
+2638:2:3127
+2639:0:4533
+2640:2:4109
+2641:2:4110
+2642:2:4114
+2643:2:4115
+2644:2:4123
+2645:2:4124
+2646:2:4128
+2647:2:4129
+2648:2:4137
+2649:2:4142
+2650:2:4146
+2651:2:4147
+2652:2:4155
+2653:2:4156
+2654:2:4160
+2655:2:4161
+2656:2:4155
+2657:2:4156
+2658:2:4160
+2659:2:4161
+2660:2:4169
+2661:2:4174
+2662:2:4175
+2663:2:4186
+2664:2:4194
+2665:2:4195
+2666:2:4199
+2667:2:4204
+2668:2:4205
+2669:2:4216
+2670:2:4217
+2671:2:4218
+2672:2:4216
+2673:2:4217
+2674:2:4218
+2675:2:4229
+2676:0:4533
+2677:2:3127
+2678:0:4533
+2679:1:3066
+2680:1:3073
+2681:1:3074
+2682:1:3081
+2683:1:3086
+2684:1:3093
+2685:1:3094
+2686:1:3093
+2687:1:3094
+2688:1:3101
+2689:1:3105
+2690:0:4533
+2691:2:3975
+2692:2:3976
+2693:2:3980
+2694:2:3981
+2695:2:3989
+2696:2:3990
+2697:2:3994
+2698:2:3995
+2699:2:4003
+2700:2:4008
+2701:2:4012
+2702:2:4013
+2703:2:4021
+2704:2:4022
+2705:2:4026
+2706:2:4027
+2707:2:4021
+2708:2:4022
+2709:2:4026
+2710:2:4027
+2711:2:4035
+2712:2:4040
+2713:2:4041
+2714:2:4052
+2715:2:4060
+2716:2:4061
+2717:2:4065
+2718:2:4070
+2719:2:4071
+2720:2:4082
+2721:2:4083
+2722:2:4084
+2723:2:4082
+2724:2:4083
+2725:2:4084
+2726:2:4095
+2727:2:4103
+2728:0:4533
+2729:2:3127
+2730:0:4533
+2731:2:4109
+2732:2:4110
+2733:2:4114
+2734:2:4115
+2735:2:4123
+2736:2:4124
+2737:2:4128
+2738:2:4129
+2739:2:4137
+2740:2:4142
+2741:2:4146
+2742:2:4147
+2743:2:4155
+2744:2:4156
+2745:2:4160
+2746:2:4161
+2747:2:4155
+2748:2:4156
+2749:2:4160
+2750:2:4161
+2751:2:4169
+2752:2:4174
+2753:2:4175
+2754:2:4186
+2755:2:4194
+2756:2:4195
+2757:2:4199
+2758:2:4204
+2759:2:4205
+2760:2:4216
+2761:2:4217
+2762:2:4218
+2763:2:4216
+2764:2:4217
+2765:2:4218
+2766:2:4229
+2767:0:4533
+2768:2:3127
+2769:0:4533
+2770:1:2658
+2771:1:2659
+2772:0:4533
+2773:1:11
+2774:0:4533
+2775:2:3975
+2776:2:3976
+2777:2:3980
+2778:2:3981
+2779:2:3989
+2780:2:3990
+2781:2:3994
+2782:2:3995
+2783:2:4003
+2784:2:4008
+2785:2:4012
+2786:2:4013
+2787:2:4021
+2788:2:4022
+2789:2:4026
+2790:2:4027
+2791:2:4021
+2792:2:4022
+2793:2:4026
+2794:2:4027
+2795:2:4035
+2796:2:4040
+2797:2:4041
+2798:2:4052
+2799:2:4060
+2800:2:4061
+2801:2:4065
+2802:2:4070
+2803:2:4071
+2804:2:4082
+2805:2:4083
+2806:2:4084
+2807:2:4082
+2808:2:4083
+2809:2:4084
+2810:2:4095
+2811:2:4103
+2812:0:4533
+2813:2:3127
+2814:0:4533
+2815:2:4109
+2816:2:4110
+2817:2:4114
+2818:2:4115
+2819:2:4123
+2820:2:4124
+2821:2:4128
+2822:2:4129
+2823:2:4137
+2824:2:4142
+2825:2:4146
+2826:2:4147
+2827:2:4155
+2828:2:4156
+2829:2:4160
+2830:2:4161
+2831:2:4155
+2832:2:4156
+2833:2:4160
+2834:2:4161
+2835:2:4169
+2836:2:4174
+2837:2:4175
+2838:2:4186
+2839:2:4194
+2840:2:4195
+2841:2:4199
+2842:2:4204
+2843:2:4205
+2844:2:4216
+2845:2:4217
+2846:2:4218
+2847:2:4216
+2848:2:4217
+2849:2:4218
+2850:2:4229
+2851:0:4533
+2852:2:3127
+2853:0:4533
+2854:1:2660
+2855:1:2664
+2856:1:2665
+2857:1:2669
+2858:1:2673
+2859:1:2674
+2860:1:2678
+2861:1:2686
+2862:1:2687
+2863:1:2691
+2864:1:2695
+2865:1:2696
+2866:1:2691
+2867:1:2695
+2868:1:2696
+2869:1:2700
+2870:1:2707
+2871:1:2714
+2872:1:2715
+2873:1:2722
+2874:1:2727
+2875:1:2734
+2876:1:2735
+2877:1:2734
+2878:1:2735
+2879:1:2742
+2880:0:4533
+2881:1:11
+2882:0:4533
+2883:2:3975
+2884:2:3976
+2885:2:3980
+2886:2:3981
+2887:2:3989
+2888:2:3990
+2889:2:3994
+2890:2:3995
+2891:2:4003
+2892:2:4008
+2893:2:4012
+2894:2:4013
+2895:2:4021
+2896:2:4022
+2897:2:4026
+2898:2:4027
+2899:2:4021
+2900:2:4022
+2901:2:4026
+2902:2:4027
+2903:2:4035
+2904:2:4040
+2905:2:4041
+2906:2:4052
+2907:2:4060
+2908:2:4061
+2909:2:4065
+2910:2:4070
+2911:2:4071
+2912:2:4082
+2913:2:4083
+2914:2:4084
+2915:2:4082
+2916:2:4083
+2917:2:4084
+2918:2:4095
+2919:2:4103
+2920:0:4533
+2921:2:3127
+2922:0:4533
+2923:2:4109
+2924:2:4110
+2925:2:4114
+2926:2:4115
+2927:2:4123
+2928:2:4124
+2929:2:4128
+2930:2:4129
+2931:2:4137
+2932:2:4142
+2933:2:4146
+2934:2:4147
+2935:2:4155
+2936:2:4156
+2937:2:4160
+2938:2:4161
+2939:2:4155
+2940:2:4156
+2941:2:4160
+2942:2:4161
+2943:2:4169
+2944:2:4174
+2945:2:4175
+2946:2:4186
+2947:2:4194
+2948:2:4195
+2949:2:4199
+2950:2:4204
+2951:2:4205
+2952:2:4216
+2953:2:4217
+2954:2:4218
+2955:2:4216
+2956:2:4217
+2957:2:4218
+2958:2:4229
+2959:0:4533
+2960:2:3127
+2961:0:4533
+2962:1:2752
+2963:1:2753
+2964:1:2757
+2965:1:2758
+2966:1:2766
+2967:1:2767
+2968:1:2771
+2969:1:2772
+2970:1:2780
+2971:1:2785
+2972:1:2789
+2973:1:2790
+2974:1:2798
+2975:1:2799
+2976:1:2803
+2977:1:2804
+2978:1:2798
+2979:1:2799
+2980:1:2803
+2981:1:2804
+2982:1:2812
+2983:1:2817
+2984:1:2818
+2985:1:2829
+2986:1:2830
+2987:1:2831
+2988:1:2842
+2989:1:2847
+2990:1:2848
+2991:1:2859
+2992:1:2860
+2993:1:2861
+2994:1:2859
+2995:1:2860
+2996:1:2861
+2997:1:2872
+2998:0:4533
+2999:1:11
+3000:0:4533
+3001:2:3975
+3002:2:3976
+3003:2:3980
+3004:2:3981
+3005:2:3989
+3006:2:3990
+3007:2:3994
+3008:2:3995
+3009:2:4003
+3010:2:4008
+3011:2:4012
+3012:2:4013
+3013:2:4021
+3014:2:4022
+3015:2:4026
+3016:2:4027
+3017:2:4021
+3018:2:4022
+3019:2:4026
+3020:2:4027
+3021:2:4035
+3022:2:4040
+3023:2:4041
+3024:2:4052
+3025:2:4060
+3026:2:4061
+3027:2:4065
+3028:2:4070
+3029:2:4071
+3030:2:4082
+3031:2:4083
+3032:2:4084
+3033:2:4082
+3034:2:4083
+3035:2:4084
+3036:2:4095
+3037:2:4103
+3038:0:4533
+3039:2:3127
+3040:0:4533
+3041:2:4109
+3042:2:4110
+3043:2:4114
+3044:2:4115
+3045:2:4123
+3046:2:4124
+3047:2:4128
+3048:2:4129
+3049:2:4137
+3050:2:4142
+3051:2:4146
+3052:2:4147
+3053:2:4155
+3054:2:4156
+3055:2:4160
+3056:2:4161
+3057:2:4155
+3058:2:4156
+3059:2:4160
+3060:2:4161
+3061:2:4169
+3062:2:4174
+3063:2:4175
+3064:2:4186
+3065:2:4194
+3066:2:4195
+3067:2:4199
+3068:2:4204
+3069:2:4205
+3070:2:4216
+3071:2:4217
+3072:2:4218
+3073:2:4216
+3074:2:4217
+3075:2:4218
+3076:2:4229
+3077:0:4533
+3078:2:3127
+3079:0:4533
+3080:1:2881
+3081:1:2882
+3082:1:2886
+3083:1:2887
+3084:1:2895
+3085:1:2896
+3086:1:2900
+3087:1:2901
+3088:1:2909
+3089:1:2914
+3090:1:2918
+3091:1:2919
+3092:1:2927
+3093:1:2928
+3094:1:2932
+3095:1:2933
+3096:1:2927
+3097:1:2928
+3098:1:2932
+3099:1:2933
+3100:1:2941
+3101:1:2946
+3102:1:2947
+3103:1:2958
+3104:1:2959
+3105:1:2960
+3106:1:2971
+3107:1:2976
+3108:1:2977
+3109:1:2988
+3110:1:2989
+3111:1:2990
+3112:1:2988
+3113:1:2989
+3114:1:2990
+3115:1:3001
+3116:1:3008
+3117:1:3012
+3118:0:4533
+3119:1:11
+3120:0:4533
+3121:2:3975
+3122:2:3976
+3123:2:3980
+3124:2:3981
+3125:2:3989
+3126:2:3990
+3127:2:3994
+3128:2:3995
+3129:2:4003
+3130:2:4008
+3131:2:4012
+3132:2:4013
+3133:2:4021
+3134:2:4022
+3135:2:4026
+3136:2:4027
+3137:2:4021
+3138:2:4022
+3139:2:4026
+3140:2:4027
+3141:2:4035
+3142:2:4040
+3143:2:4041
+3144:2:4052
+3145:2:4060
+3146:2:4061
+3147:2:4065
+3148:2:4070
+3149:2:4071
+3150:2:4082
+3151:2:4083
+3152:2:4084
+3153:2:4082
+3154:2:4083
+3155:2:4084
+3156:2:4095
+3157:2:4103
+3158:0:4533
+3159:2:3127
+3160:0:4533
+3161:2:4109
+3162:2:4110
+3163:2:4114
+3164:2:4115
+3165:2:4123
+3166:2:4124
+3167:2:4128
+3168:2:4129
+3169:2:4137
+3170:2:4142
+3171:2:4146
+3172:2:4147
+3173:2:4155
+3174:2:4156
+3175:2:4160
+3176:2:4161
+3177:2:4155
+3178:2:4156
+3179:2:4160
+3180:2:4161
+3181:2:4169
+3182:2:4174
+3183:2:4175
+3184:2:4186
+3185:2:4194
+3186:2:4195
+3187:2:4199
+3188:2:4204
+3189:2:4205
+3190:2:4216
+3191:2:4217
+3192:2:4218
+3193:2:4216
+3194:2:4217
+3195:2:4218
+3196:2:4229
+3197:0:4533
+3198:2:3127
+3199:0:4533
+3200:1:3013
+3201:0:4533
+3202:1:3021
+3203:0:4533
+3204:1:3109
+3205:0:4533
+3206:1:9
+3207:0:4533
+3208:2:3975
+3209:2:3976
+3210:2:3980
+3211:2:3981
+3212:2:3989
+3213:2:3990
+3214:2:3994
+3215:2:3995
+3216:2:4003
+3217:2:4008
+3218:2:4012
+3219:2:4013
+3220:2:4021
+3221:2:4022
+3222:2:4026
+3223:2:4027
+3224:2:4021
+3225:2:4022
+3226:2:4026
+3227:2:4027
+3228:2:4035
+3229:2:4040
+3230:2:4041
+3231:2:4052
+3232:2:4060
+3233:2:4061
+3234:2:4065
+3235:2:4070
+3236:2:4071
+3237:2:4082
+3238:2:4083
+3239:2:4084
+3240:2:4082
+3241:2:4083
+3242:2:4084
+3243:2:4095
+3244:2:4103
+3245:0:4533
+3246:2:3127
+3247:0:4533
+3248:2:4109
+3249:2:4110
+3250:2:4114
+3251:2:4115
+3252:2:4123
+3253:2:4124
+3254:2:4128
+3255:2:4129
+3256:2:4137
+3257:2:4142
+3258:2:4146
+3259:2:4147
+3260:2:4155
+3261:2:4156
+3262:2:4160
+3263:2:4161
+3264:2:4155
+3265:2:4156
+3266:2:4160
+3267:2:4161
+3268:2:4169
+3269:2:4174
+3270:2:4175
+3271:2:4186
+3272:2:4194
+3273:2:4195
+3274:2:4199
+3275:2:4204
+3276:2:4205
+3277:2:4216
+3278:2:4217
+3279:2:4218
+3280:2:4216
+3281:2:4217
+3282:2:4218
+3283:2:4229
+3284:0:4533
+3285:2:3127
+3286:0:4533
+3287:1:10
+3288:0:4533
+3289:1:11
+3290:0:4533
+3291:2:3975
+3292:2:3976
+3293:2:3980
+3294:2:3981
+3295:2:3989
+3296:2:3990
+3297:2:3994
+3298:2:3995
+3299:2:4003
+3300:2:4008
+3301:2:4012
+3302:2:4013
+3303:2:4021
+3304:2:4022
+3305:2:4026
+3306:2:4027
+3307:2:4021
+3308:2:4022
+3309:2:4026
+3310:2:4027
+3311:2:4035
+3312:2:4040
+3313:2:4041
+3314:2:4052
+3315:2:4060
+3316:2:4061
+3317:2:4065
+3318:2:4070
+3319:2:4071
+3320:2:4082
+3321:2:4083
+3322:2:4084
+3323:2:4082
+3324:2:4083
+3325:2:4084
+3326:2:4095
+3327:2:4103
+3328:0:4533
+3329:2:3127
+3330:0:4533
+3331:2:4109
+3332:2:4110
+3333:2:4114
+3334:2:4115
+3335:2:4123
+3336:2:4124
+3337:2:4128
+3338:2:4129
+3339:2:4137
+3340:2:4142
+3341:2:4146
+3342:2:4147
+3343:2:4155
+3344:2:4156
+3345:2:4160
+3346:2:4161
+3347:2:4155
+3348:2:4156
+3349:2:4160
+3350:2:4161
+3351:2:4169
+3352:2:4174
+3353:2:4175
+3354:2:4186
+3355:2:4194
+3356:2:4195
+3357:2:4199
+3358:2:4204
+3359:2:4205
+3360:2:4216
+3361:2:4217
+3362:2:4218
+3363:2:4216
+3364:2:4217
+3365:2:4218
+3366:2:4229
+3367:0:4533
+3368:2:3127
+3369:0:4533
+3370:1:12
+3371:1:13
+3372:1:17
+3373:1:18
+3374:1:26
+3375:1:27
+3376:1:28
+3377:1:40
+3378:1:45
+3379:1:49
+3380:1:50
+3381:1:58
+3382:1:59
+3383:1:63
+3384:1:64
+3385:1:58
+3386:1:59
+3387:1:63
+3388:1:64
+3389:1:72
+3390:1:77
+3391:1:78
+3392:1:89
+3393:1:90
+3394:1:91
+3395:1:102
+3396:1:107
+3397:1:108
+3398:1:119
+3399:1:120
+3400:1:121
+3401:1:119
+3402:1:120
+3403:1:121
+3404:1:132
+3405:0:4533
+3406:1:11
+3407:0:4533
+3408:2:3975
+3409:2:3976
+3410:2:3980
+3411:2:3981
+3412:2:3989
+3413:2:3990
+3414:2:3994
+3415:2:3995
+3416:2:4003
+3417:2:4008
+3418:2:4012
+3419:2:4013
+3420:2:4021
+3421:2:4022
+3422:2:4026
+3423:2:4027
+3424:2:4021
+3425:2:4022
+3426:2:4026
+3427:2:4027
+3428:2:4035
+3429:2:4040
+3430:2:4041
+3431:2:4052
+3432:2:4060
+3433:2:4061
+3434:2:4065
+3435:2:4070
+3436:2:4071
+3437:2:4082
+3438:2:4083
+3439:2:4084
+3440:2:4082
+3441:2:4083
+3442:2:4084
+3443:2:4095
+3444:2:4103
+3445:0:4533
+3446:2:3127
+3447:0:4533
+3448:2:4109
+3449:2:4110
+3450:2:4114
+3451:2:4115
+3452:2:4123
+3453:2:4124
+3454:2:4128
+3455:2:4129
+3456:2:4137
+3457:2:4142
+3458:2:4146
+3459:2:4147
+3460:2:4155
+3461:2:4156
+3462:2:4160
+3463:2:4161
+3464:2:4155
+3465:2:4156
+3466:2:4160
+3467:2:4161
+3468:2:4169
+3469:2:4174
+3470:2:4175
+3471:2:4186
+3472:2:4194
+3473:2:4195
+3474:2:4199
+3475:2:4204
+3476:2:4205
+3477:2:4216
+3478:2:4217
+3479:2:4218
+3480:2:4216
+3481:2:4217
+3482:2:4218
+3483:2:4229
+3484:0:4533
+3485:2:3127
+3486:0:4533
+3487:1:141
+3488:1:142
+3489:0:4533
+3490:1:11
+3491:0:4533
+3492:2:3975
+3493:2:3976
+3494:2:3980
+3495:2:3981
+3496:2:3989
+3497:2:3990
+3498:2:3994
+3499:2:3995
+3500:2:4003
+3501:2:4008
+3502:2:4012
+3503:2:4013
+3504:2:4021
+3505:2:4022
+3506:2:4026
+3507:2:4027
+3508:2:4021
+3509:2:4022
+3510:2:4026
+3511:2:4027
+3512:2:4035
+3513:2:4040
+3514:2:4041
+3515:2:4052
+3516:2:4060
+3517:2:4061
+3518:2:4065
+3519:2:4070
+3520:2:4071
+3521:2:4082
+3522:2:4083
+3523:2:4084
+3524:2:4082
+3525:2:4083
+3526:2:4084
+3527:2:4095
+3528:2:4103
+3529:0:4533
+3530:2:3127
+3531:0:4533
+3532:2:4109
+3533:2:4110
+3534:2:4114
+3535:2:4115
+3536:2:4123
+3537:2:4124
+3538:2:4128
+3539:2:4129
+3540:2:4137
+3541:2:4142
+3542:2:4146
+3543:2:4147
+3544:2:4155
+3545:2:4156
+3546:2:4160
+3547:2:4161
+3548:2:4155
+3549:2:4156
+3550:2:4160
+3551:2:4161
+3552:2:4169
+3553:2:4174
+3554:2:4175
+3555:2:4186
+3556:2:4194
+3557:2:4195
+3558:2:4199
+3559:2:4204
+3560:2:4205
+3561:2:4216
+3562:2:4217
+3563:2:4218
+3564:2:4216
+3565:2:4217
+3566:2:4218
+3567:2:4229
+3568:0:4533
+3569:2:3127
+3570:0:4533
+3571:1:148
+3572:1:149
+3573:1:153
+3574:1:154
+3575:1:162
+3576:1:163
+3577:1:167
+3578:1:168
+3579:1:176
+3580:1:181
+3581:1:185
+3582:1:186
+3583:1:194
+3584:1:195
+3585:1:199
+3586:1:200
+3587:1:194
+3588:1:195
+3589:1:199
+3590:1:200
+3591:1:208
+3592:1:213
+3593:1:214
+3594:1:225
+3595:1:226
+3596:1:227
+3597:1:238
+3598:1:243
+3599:1:244
+3600:1:255
+3601:1:256
+3602:1:257
+3603:1:255
+3604:1:256
+3605:1:257
+3606:1:268
+3607:0:4533
+3608:1:11
+3609:0:4533
+3610:2:3975
+3611:2:3976
+3612:2:3980
+3613:2:3981
+3614:2:3989
+3615:2:3990
+3616:2:3994
+3617:2:3995
+3618:2:4003
+3619:2:4008
+3620:2:4012
+3621:2:4013
+3622:2:4021
+3623:2:4022
+3624:2:4026
+3625:2:4027
+3626:2:4021
+3627:2:4022
+3628:2:4026
+3629:2:4027
+3630:2:4035
+3631:2:4040
+3632:2:4041
+3633:2:4052
+3634:2:4060
+3635:2:4061
+3636:2:4065
+3637:2:4070
+3638:2:4071
+3639:2:4082
+3640:2:4083
+3641:2:4084
+3642:2:4082
+3643:2:4083
+3644:2:4084
+3645:2:4095
+3646:2:4103
+3647:0:4533
+3648:2:3127
+3649:0:4533
+3650:2:4109
+3651:2:4110
+3652:2:4114
+3653:2:4115
+3654:2:4123
+3655:2:4124
+3656:2:4128
+3657:2:4129
+3658:2:4137
+3659:2:4142
+3660:2:4146
+3661:2:4147
+3662:2:4155
+3663:2:4156
+3664:2:4160
+3665:2:4161
+3666:2:4155
+3667:2:4156
+3668:2:4160
+3669:2:4161
+3670:2:4169
+3671:2:4174
+3672:2:4175
+3673:2:4186
+3674:2:4194
+3675:2:4195
+3676:2:4199
+3677:2:4204
+3678:2:4205
+3679:2:4216
+3680:2:4217
+3681:2:4218
+3682:2:4216
+3683:2:4217
+3684:2:4218
+3685:2:4229
+3686:0:4533
+3687:2:3127
+3688:0:4533
+3689:1:277
+3690:1:278
+3691:1:282
+3692:1:283
+3693:1:291
+3694:1:292
+3695:1:296
+3696:1:297
+3697:1:305
+3698:1:310
+3699:1:314
+3700:1:315
+3701:1:323
+3702:1:324
+3703:1:328
+3704:1:329
+3705:1:323
+3706:1:324
+3707:1:328
+3708:1:329
+3709:1:337
+3710:1:342
+3711:1:343
+3712:1:354
+3713:1:355
+3714:1:356
+3715:1:367
+3716:1:372
+3717:1:373
+3718:1:384
+3719:1:385
+3720:1:386
+3721:1:384
+3722:1:385
+3723:1:386
+3724:1:397
+3725:1:404
+3726:0:4533
+3727:1:11
+3728:0:4533
+3729:2:3975
+3730:2:3976
+3731:2:3980
+3732:2:3981
+3733:2:3989
+3734:2:3990
+3735:2:3994
+3736:2:3995
+3737:2:4003
+3738:2:4008
+3739:2:4012
+3740:2:4013
+3741:2:4021
+3742:2:4022
+3743:2:4026
+3744:2:4027
+3745:2:4021
+3746:2:4022
+3747:2:4026
+3748:2:4027
+3749:2:4035
+3750:2:4040
+3751:2:4041
+3752:2:4052
+3753:2:4060
+3754:2:4061
+3755:2:4065
+3756:2:4070
+3757:2:4071
+3758:2:4082
+3759:2:4083
+3760:2:4084
+3761:2:4082
+3762:2:4083
+3763:2:4084
+3764:2:4095
+3765:2:4103
+3766:0:4533
+3767:2:3127
+3768:0:4533
+3769:2:4109
+3770:2:4110
+3771:2:4114
+3772:2:4115
+3773:2:4123
+3774:2:4124
+3775:2:4128
+3776:2:4129
+3777:2:4137
+3778:2:4142
+3779:2:4146
+3780:2:4147
+3781:2:4155
+3782:2:4156
+3783:2:4160
+3784:2:4161
+3785:2:4155
+3786:2:4156
+3787:2:4160
+3788:2:4161
+3789:2:4169
+3790:2:4174
+3791:2:4175
+3792:2:4186
+3793:2:4194
+3794:2:4195
+3795:2:4199
+3796:2:4204
+3797:2:4205
+3798:2:4216
+3799:2:4217
+3800:2:4218
+3801:2:4216
+3802:2:4217
+3803:2:4218
+3804:2:4229
+3805:0:4533
+3806:2:3127
+3807:0:4533
+3808:1:540
+3809:1:544
+3810:1:545
+3811:1:549
+3812:1:550
+3813:1:558
+3814:1:566
+3815:1:567
+3816:1:571
+3817:1:575
+3818:1:576
+3819:1:571
+3820:1:575
+3821:1:576
+3822:1:580
+3823:1:587
+3824:1:594
+3825:1:595
+3826:1:602
+3827:1:607
+3828:1:614
+3829:1:615
+3830:1:614
+3831:1:615
+3832:1:622
+3833:0:4533
+3834:1:11
+3835:0:4533
+3836:2:3975
+3837:2:3976
+3838:2:3980
+3839:2:3981
+3840:2:3989
+3841:2:3990
+3842:2:3994
+3843:2:3995
+3844:2:4003
+3845:2:4008
+3846:2:4012
+3847:2:4013
+3848:2:4021
+3849:2:4022
+3850:2:4026
+3851:2:4027
+3852:2:4021
+3853:2:4022
+3854:2:4026
+3855:2:4027
+3856:2:4035
+3857:2:4040
+3858:2:4041
+3859:2:4052
+3860:2:4060
+3861:2:4061
+3862:2:4065
+3863:2:4070
+3864:2:4071
+3865:2:4082
+3866:2:4083
+3867:2:4084
+3868:2:4082
+3869:2:4083
+3870:2:4084
+3871:2:4095
+3872:2:4103
+3873:0:4533
+3874:2:3127
+3875:0:4533
+3876:2:4109
+3877:2:4110
+3878:2:4114
+3879:2:4115
+3880:2:4123
+3881:2:4124
+3882:2:4128
+3883:2:4129
+3884:2:4137
+3885:2:4142
+3886:2:4146
+3887:2:4147
+3888:2:4155
+3889:2:4156
+3890:2:4160
+3891:2:4161
+3892:2:4155
+3893:2:4156
+3894:2:4160
+3895:2:4161
+3896:2:4169
+3897:2:4174
+3898:2:4175
+3899:2:4186
+3900:2:4194
+3901:2:4195
+3902:2:4199
+3903:2:4204
+3904:2:4205
+3905:2:4216
+3906:2:4217
+3907:2:4218
+3908:2:4216
+3909:2:4217
+3910:2:4218
+3911:2:4229
+3912:0:4533
+3913:2:3127
+3914:0:4533
+3915:1:632
+3916:1:633
+3917:1:637
+3918:1:638
+3919:1:646
+3920:1:647
+3921:1:651
+3922:1:652
+3923:1:660
+3924:1:665
+3925:1:669
+3926:1:670
+3927:1:678
+3928:1:679
+3929:1:683
+3930:1:684
+3931:1:678
+3932:1:679
+3933:1:683
+3934:1:684
+3935:1:692
+3936:1:697
+3937:1:698
+3938:1:709
+3939:1:710
+3940:1:711
+3941:1:722
+3942:1:727
+3943:1:728
+3944:1:739
+3945:1:740
+3946:1:741
+3947:1:739
+3948:1:740
+3949:1:741
+3950:1:752
+3951:0:4533
+3952:1:11
+3953:0:4533
+3954:2:3975
+3955:2:3976
+3956:2:3980
+3957:2:3981
+3958:2:3989
+3959:2:3990
+3960:2:3994
+3961:2:3995
+3962:2:4003
+3963:2:4008
+3964:2:4012
+3965:2:4013
+3966:2:4021
+3967:2:4022
+3968:2:4026
+3969:2:4027
+3970:2:4021
+3971:2:4022
+3972:2:4026
+3973:2:4027
+3974:2:4035
+3975:2:4040
+3976:2:4041
+3977:2:4052
+3978:2:4060
+3979:2:4061
+3980:2:4065
+3981:2:4070
+3982:2:4071
+3983:2:4082
+3984:2:4083
+3985:2:4084
+3986:2:4082
+3987:2:4083
+3988:2:4084
+3989:2:4095
+3990:2:4103
+3991:0:4533
+3992:2:3127
+3993:0:4533
+3994:2:4109
+3995:2:4110
+3996:2:4114
+3997:2:4115
+3998:2:4123
+3999:2:4124
+4000:2:4128
+4001:2:4129
+4002:2:4137
+4003:2:4142
+4004:2:4146
+4005:2:4147
+4006:2:4155
+4007:2:4156
+4008:2:4160
+4009:2:4161
+4010:2:4155
+4011:2:4156
+4012:2:4160
+4013:2:4161
+4014:2:4169
+4015:2:4174
+4016:2:4175
+4017:2:4186
+4018:2:4194
+4019:2:4195
+4020:2:4199
+4021:2:4204
+4022:2:4205
+4023:2:4216
+4024:2:4217
+4025:2:4218
+4026:2:4216
+4027:2:4217
+4028:2:4218
+4029:2:4229
+4030:0:4533
+4031:2:3127
+4032:0:4533
+4033:1:761
+4034:1:764
+4035:1:765
+4036:0:4533
+4037:1:11
+4038:0:4533
+4039:2:3975
+4040:2:3976
+4041:2:3980
+4042:2:3981
+4043:2:3989
+4044:2:3990
+4045:2:3994
+4046:2:3995
+4047:2:4003
+4048:2:4008
+4049:2:4012
+4050:2:4013
+4051:2:4021
+4052:2:4022
+4053:2:4026
+4054:2:4027
+4055:2:4021
+4056:2:4022
+4057:2:4026
+4058:2:4027
+4059:2:4035
+4060:2:4040
+4061:2:4041
+4062:2:4052
+4063:2:4060
+4064:2:4061
+4065:2:4065
+4066:2:4070
+4067:2:4071
+4068:2:4082
+4069:2:4083
+4070:2:4084
+4071:2:4082
+4072:2:4083
+4073:2:4084
+4074:2:4095
+4075:2:4103
+4076:0:4533
+4077:2:3127
+4078:0:4533
+4079:2:4109
+4080:2:4110
+4081:2:4114
+4082:2:4115
+4083:2:4123
+4084:2:4124
+4085:2:4128
+4086:2:4129
+4087:2:4137
+4088:2:4142
+4089:2:4146
+4090:2:4147
+4091:2:4155
+4092:2:4156
+4093:2:4160
+4094:2:4161
+4095:2:4155
+4096:2:4156
+4097:2:4160
+4098:2:4161
+4099:2:4169
+4100:2:4174
+4101:2:4175
+4102:2:4186
+4103:2:4194
+4104:2:4195
+4105:2:4199
+4106:2:4204
+4107:2:4205
+4108:2:4216
+4109:2:4217
+4110:2:4218
+4111:2:4216
+4112:2:4217
+4113:2:4218
+4114:2:4229
+4115:0:4533
+4116:2:3127
+4117:0:4533
+4118:1:768
+4119:1:769
+4120:1:773
+4121:1:774
+4122:1:782
+4123:1:783
+4124:1:787
+4125:1:788
+4126:1:796
+4127:1:801
+4128:1:805
+4129:1:806
+4130:1:814
+4131:1:815
+4132:1:819
+4133:1:820
+4134:1:814
+4135:1:815
+4136:1:819
+4137:1:820
+4138:1:828
+4139:1:833
+4140:1:834
+4141:1:845
+4142:1:846
+4143:1:847
+4144:1:858
+4145:1:863
+4146:1:864
+4147:1:875
+4148:1:876
+4149:1:877
+4150:1:875
+4151:1:876
+4152:1:877
+4153:1:888
+4154:0:4533
+4155:1:11
+4156:0:4533
+4157:2:3975
+4158:2:3976
+4159:2:3980
+4160:2:3981
+4161:2:3989
+4162:2:3990
+4163:2:3994
+4164:2:3995
+4165:2:4003
+4166:2:4008
+4167:2:4012
+4168:2:4013
+4169:2:4021
+4170:2:4022
+4171:2:4026
+4172:2:4027
+4173:2:4021
+4174:2:4022
+4175:2:4026
+4176:2:4027
+4177:2:4035
+4178:2:4040
+4179:2:4041
+4180:2:4052
+4181:2:4060
+4182:2:4061
+4183:2:4065
+4184:2:4070
+4185:2:4071
+4186:2:4082
+4187:2:4083
+4188:2:4084
+4189:2:4082
+4190:2:4083
+4191:2:4084
+4192:2:4095
+4193:2:4103
+4194:0:4533
+4195:2:3127
+4196:0:4533
+4197:2:4109
+4198:2:4110
+4199:2:4114
+4200:2:4115
+4201:2:4123
+4202:2:4124
+4203:2:4128
+4204:2:4129
+4205:2:4137
+4206:2:4142
+4207:2:4146
+4208:2:4147
+4209:2:4155
+4210:2:4156
+4211:2:4160
+4212:2:4161
+4213:2:4155
+4214:2:4156
+4215:2:4160
+4216:2:4161
+4217:2:4169
+4218:2:4174
+4219:2:4175
+4220:2:4186
+4221:2:4194
+4222:2:4195
+4223:2:4199
+4224:2:4204
+4225:2:4205
+4226:2:4216
+4227:2:4217
+4228:2:4218
+4229:2:4216
+4230:2:4217
+4231:2:4218
+4232:2:4229
+4233:0:4533
+4234:2:3127
+4235:0:4533
+4236:1:1028
+4237:1:1029
+4238:1:1033
+4239:1:1034
+4240:1:1042
+4241:1:1043
+4242:1:1047
+4243:1:1048
+4244:1:1056
+4245:1:1061
+4246:1:1065
+4247:1:1066
+4248:1:1074
+4249:1:1075
+4250:1:1079
+4251:1:1080
+4252:1:1074
+4253:1:1075
+4254:1:1079
+4255:1:1080
+4256:1:1088
+4257:1:1093
+4258:1:1094
+4259:1:1105
+4260:1:1106
+4261:1:1107
+4262:1:1118
+4263:1:1123
+4264:1:1124
+4265:1:1135
+4266:1:1136
+4267:1:1137
+4268:1:1135
+4269:1:1136
+4270:1:1137
+4271:1:1148
+4272:1:1155
+4273:1:1159
+4274:0:4533
+4275:1:11
+4276:0:4533
+4277:2:3975
+4278:2:3976
+4279:2:3980
+4280:2:3981
+4281:2:3989
+4282:2:3990
+4283:2:3994
+4284:2:3995
+4285:2:4003
+4286:2:4008
+4287:2:4012
+4288:2:4013
+4289:2:4021
+4290:2:4022
+4291:2:4026
+4292:2:4027
+4293:2:4021
+4294:2:4022
+4295:2:4026
+4296:2:4027
+4297:2:4035
+4298:2:4040
+4299:2:4041
+4300:2:4052
+4301:2:4060
+4302:2:4061
+4303:2:4065
+4304:2:4070
+4305:2:4071
+4306:2:4082
+4307:2:4083
+4308:2:4084
+4309:2:4082
+4310:2:4083
+4311:2:4084
+4312:2:4095
+4313:2:4103
+4314:0:4533
+4315:2:3127
+4316:0:4533
+4317:2:4109
+4318:2:4110
+4319:2:4114
+4320:2:4115
+4321:2:4123
+4322:2:4124
+4323:2:4128
+4324:2:4129
+4325:2:4137
+4326:2:4142
+4327:2:4146
+4328:2:4147
+4329:2:4155
+4330:2:4156
+4331:2:4160
+4332:2:4161
+4333:2:4155
+4334:2:4156
+4335:2:4160
+4336:2:4161
+4337:2:4169
+4338:2:4174
+4339:2:4175
+4340:2:4186
+4341:2:4194
+4342:2:4195
+4343:2:4199
+4344:2:4204
+4345:2:4205
+4346:2:4216
+4347:2:4217
+4348:2:4218
+4349:2:4216
+4350:2:4217
+4351:2:4218
+4352:2:4229
+4353:0:4533
+4354:2:3127
+4355:0:4533
+4356:1:1160
+4357:1:1161
+4358:1:1165
+4359:1:1166
+4360:1:1174
+4361:1:1175
+4362:1:1176
+4363:1:1188
+4364:1:1193
+4365:1:1197
+4366:1:1198
+4367:1:1206
+4368:1:1207
+4369:1:1211
+4370:1:1212
+4371:1:1206
+4372:1:1207
+4373:1:1211
+4374:1:1212
+4375:1:1220
+4376:1:1225
+4377:1:1226
+4378:1:1237
+4379:1:1238
+4380:1:1239
+4381:1:1250
+4382:1:1255
+4383:1:1256
+4384:1:1267
+4385:1:1268
+4386:1:1269
+4387:1:1267
+4388:1:1268
+4389:1:1269
+4390:1:1280
+4391:0:4533
+4392:1:11
+4393:0:4533
+4394:2:3975
+4395:2:3976
+4396:2:3980
+4397:2:3981
+4398:2:3989
+4399:2:3990
+4400:2:3994
+4401:2:3995
+4402:2:4003
+4403:2:4008
+4404:2:4012
+4405:2:4013
+4406:2:4021
+4407:2:4022
+4408:2:4026
+4409:2:4027
+4410:2:4021
+4411:2:4022
+4412:2:4026
+4413:2:4027
+4414:2:4035
+4415:2:4040
+4416:2:4041
+4417:2:4052
+4418:2:4060
+4419:2:4061
+4420:2:4065
+4421:2:4070
+4422:2:4071
+4423:2:4082
+4424:2:4083
+4425:2:4084
+4426:2:4082
+4427:2:4083
+4428:2:4084
+4429:2:4095
+4430:2:4103
+4431:0:4533
+4432:2:3127
+4433:0:4533
+4434:2:4109
+4435:2:4110
+4436:2:4114
+4437:2:4115
+4438:2:4123
+4439:2:4124
+4440:2:4128
+4441:2:4129
+4442:2:4137
+4443:2:4142
+4444:2:4146
+4445:2:4147
+4446:2:4155
+4447:2:4156
+4448:2:4160
+4449:2:4161
+4450:2:4155
+4451:2:4156
+4452:2:4160
+4453:2:4161
+4454:2:4169
+4455:2:4174
+4456:2:4175
+4457:2:4186
+4458:2:4194
+4459:2:4195
+4460:2:4199
+4461:2:4204
+4462:2:4205
+4463:2:4216
+4464:2:4217
+4465:2:4218
+4466:2:4216
+4467:2:4217
+4468:2:4218
+4469:2:4229
+4470:0:4533
+4471:2:3127
+4472:0:4533
+4473:1:1289
+4474:0:4533
+4475:2:3975
+4476:2:3976
+4477:2:3980
+4478:2:3981
+4479:2:3989
+4480:2:3990
+4481:2:3994
+4482:2:3995
+4483:2:4003
+4484:2:4008
+4485:2:4012
+4486:2:4013
+4487:2:4021
+4488:2:4022
+4489:2:4026
+4490:2:4027
+4491:2:4021
+4492:2:4022
+4493:2:4026
+4494:2:4027
+4495:2:4035
+4496:2:4040
+4497:2:4041
+4498:2:4052
+4499:2:4060
+4500:2:4061
+4501:2:4065
+4502:2:4070
+4503:2:4071
+4504:2:4082
+4505:2:4083
+4506:2:4084
+4507:2:4082
+4508:2:4083
+4509:2:4084
+4510:2:4095
+4511:2:4103
+4512:0:4533
+4513:2:3127
+4514:0:4533
+4515:2:4109
+4516:2:4110
+4517:2:4114
+4518:2:4115
+4519:2:4123
+4520:2:4124
+4521:2:4128
+4522:2:4129
+4523:2:4137
+4524:2:4142
+4525:2:4146
+4526:2:4147
+4527:2:4155
+4528:2:4156
+4529:2:4160
+4530:2:4161
+4531:2:4155
+4532:2:4156
+4533:2:4160
+4534:2:4161
+4535:2:4169
+4536:2:4174
+4537:2:4175
+4538:2:4186
+4539:2:4194
+4540:2:4195
+4541:2:4199
+4542:2:4204
+4543:2:4205
+4544:2:4216
+4545:2:4217
+4546:2:4218
+4547:2:4216
+4548:2:4217
+4549:2:4218
+4550:2:4229
+4551:0:4533
+4552:2:3127
+4553:0:4533
+4554:1:3023
+4555:1:3030
+4556:1:3031
+4557:1:3038
+4558:1:3043
+4559:1:3050
+4560:1:3051
+4561:1:3050
+4562:1:3051
+4563:1:3058
+4564:1:3062
+4565:0:4533
+4566:2:3975
+4567:2:3976
+4568:2:3980
+4569:2:3981
+4570:2:3989
+4571:2:3990
+4572:2:3994
+4573:2:3995
+4574:2:4003
+4575:2:4008
+4576:2:4012
+4577:2:4013
+4578:2:4021
+4579:2:4022
+4580:2:4026
+4581:2:4027
+4582:2:4021
+4583:2:4022
+4584:2:4026
+4585:2:4027
+4586:2:4035
+4587:2:4040
+4588:2:4041
+4589:2:4052
+4590:2:4060
+4591:2:4061
+4592:2:4065
+4593:2:4070
+4594:2:4071
+4595:2:4082
+4596:2:4083
+4597:2:4084
+4598:2:4082
+4599:2:4083
+4600:2:4084
+4601:2:4095
+4602:2:4103
+4603:0:4533
+4604:2:3127
+4605:0:4533
+4606:2:4109
+4607:2:4110
+4608:2:4114
+4609:2:4115
+4610:2:4123
+4611:2:4124
+4612:2:4128
+4613:2:4129
+4614:2:4137
+4615:2:4142
+4616:2:4146
+4617:2:4147
+4618:2:4155
+4619:2:4156
+4620:2:4160
+4621:2:4161
+4622:2:4155
+4623:2:4156
+4624:2:4160
+4625:2:4161
+4626:2:4169
+4627:2:4174
+4628:2:4175
+4629:2:4186
+4630:2:4194
+4631:2:4195
+4632:2:4199
+4633:2:4204
+4634:2:4205
+4635:2:4216
+4636:2:4217
+4637:2:4218
+4638:2:4216
+4639:2:4217
+4640:2:4218
+4641:2:4229
+4642:0:4533
+4643:2:3127
+4644:0:4533
+4645:1:1291
+4646:1:1292
+4647:0:4533
+4648:1:11
+4649:0:4533
+4650:2:3975
+4651:2:3976
+4652:2:3980
+4653:2:3981
+4654:2:3989
+4655:2:3990
+4656:2:3994
+4657:2:3995
+4658:2:4003
+4659:2:4008
+4660:2:4012
+4661:2:4013
+4662:2:4021
+4663:2:4022
+4664:2:4026
+4665:2:4027
+4666:2:4021
+4667:2:4022
+4668:2:4026
+4669:2:4027
+4670:2:4035
+4671:2:4040
+4672:2:4041
+4673:2:4052
+4674:2:4060
+4675:2:4061
+4676:2:4065
+4677:2:4070
+4678:2:4071
+4679:2:4082
+4680:2:4083
+4681:2:4084
+4682:2:4082
+4683:2:4083
+4684:2:4084
+4685:2:4095
+4686:2:4103
+4687:0:4533
+4688:2:3127
+4689:0:4533
+4690:2:4109
+4691:2:4110
+4692:2:4114
+4693:2:4115
+4694:2:4123
+4695:2:4124
+4696:2:4128
+4697:2:4129
+4698:2:4137
+4699:2:4142
+4700:2:4146
+4701:2:4147
+4702:2:4155
+4703:2:4156
+4704:2:4160
+4705:2:4161
+4706:2:4155
+4707:2:4156
+4708:2:4160
+4709:2:4161
+4710:2:4169
+4711:2:4174
+4712:2:4175
+4713:2:4186
+4714:2:4194
+4715:2:4195
+4716:2:4199
+4717:2:4204
+4718:2:4205
+4719:2:4216
+4720:2:4217
+4721:2:4218
+4722:2:4216
+4723:2:4217
+4724:2:4218
+4725:2:4229
+4726:0:4533
+4727:2:3127
+4728:0:4533
+4729:1:1293
+4730:1:1294
+4731:1:1298
+4732:1:1299
+4733:1:1307
+4734:1:1308
+4735:1:1312
+4736:1:1313
+4737:1:1321
+4738:1:1326
+4739:1:1330
+4740:1:1331
+4741:1:1339
+4742:1:1340
+4743:1:1344
+4744:1:1345
+4745:1:1339
+4746:1:1340
+4747:1:1344
+4748:1:1345
+4749:1:1353
+4750:1:1358
+4751:1:1359
+4752:1:1370
+4753:1:1371
+4754:1:1372
+4755:1:1383
+4756:1:1388
+4757:1:1389
+4758:1:1400
+4759:1:1401
+4760:1:1402
+4761:1:1400
+4762:1:1401
+4763:1:1402
+4764:1:1413
+4765:0:4533
+4766:1:11
+4767:0:4533
+4768:2:3975
+4769:2:3976
+4770:2:3980
+4771:2:3981
+4772:2:3989
+4773:2:3990
+4774:2:3994
+4775:2:3995
+4776:2:4003
+4777:2:4008
+4778:2:4012
+4779:2:4013
+4780:2:4021
+4781:2:4022
+4782:2:4026
+4783:2:4027
+4784:2:4021
+4785:2:4022
+4786:2:4026
+4787:2:4027
+4788:2:4035
+4789:2:4040
+4790:2:4041
+4791:2:4052
+4792:2:4060
+4793:2:4061
+4794:2:4065
+4795:2:4070
+4796:2:4071
+4797:2:4082
+4798:2:4083
+4799:2:4084
+4800:2:4082
+4801:2:4083
+4802:2:4084
+4803:2:4095
+4804:2:4103
+4805:0:4533
+4806:2:3127
+4807:0:4533
+4808:2:4109
+4809:2:4110
+4810:2:4114
+4811:2:4115
+4812:2:4123
+4813:2:4124
+4814:2:4128
+4815:2:4129
+4816:2:4137
+4817:2:4142
+4818:2:4146
+4819:2:4147
+4820:2:4155
+4821:2:4156
+4822:2:4160
+4823:2:4161
+4824:2:4155
+4825:2:4156
+4826:2:4160
+4827:2:4161
+4828:2:4169
+4829:2:4174
+4830:2:4175
+4831:2:4186
+4832:2:4194
+4833:2:4195
+4834:2:4199
+4835:2:4204
+4836:2:4205
+4837:2:4216
+4838:2:4217
+4839:2:4218
+4840:2:4216
+4841:2:4217
+4842:2:4218
+4843:2:4229
+4844:0:4533
+4845:2:3127
+4846:0:4533
+4847:1:1422
+4848:1:1423
+4849:1:1427
+4850:1:1428
+4851:1:1436
+4852:1:1437
+4853:1:1441
+4854:1:1442
+4855:1:1450
+4856:1:1455
+4857:1:1459
+4858:1:1460
+4859:1:1468
+4860:1:1469
+4861:1:1473
+4862:1:1474
+4863:1:1468
+4864:1:1469
+4865:1:1473
+4866:1:1474
+4867:1:1482
+4868:1:1487
+4869:1:1488
+4870:1:1499
+4871:1:1500
+4872:1:1501
+4873:1:1512
+4874:1:1517
+4875:1:1518
+4876:1:1529
+4877:1:1530
+4878:1:1531
+4879:1:1529
+4880:1:1530
+4881:1:1531
+4882:1:1542
+4883:1:1549
+4884:1:1553
+4885:0:4533
+4886:1:11
+4887:0:4533
+4888:2:3975
+4889:2:3976
+4890:2:3980
+4891:2:3981
+4892:2:3989
+4893:2:3990
+4894:2:3994
+4895:2:3995
+4896:2:4003
+4897:2:4008
+4898:2:4012
+4899:2:4013
+4900:2:4021
+4901:2:4022
+4902:2:4026
+4903:2:4027
+4904:2:4021
+4905:2:4022
+4906:2:4026
+4907:2:4027
+4908:2:4035
+4909:2:4040
+4910:2:4041
+4911:2:4052
+4912:2:4060
+4913:2:4061
+4914:2:4065
+4915:2:4070
+4916:2:4071
+4917:2:4082
+4918:2:4083
+4919:2:4084
+4920:2:4082
+4921:2:4083
+4922:2:4084
+4923:2:4095
+4924:2:4103
+4925:0:4533
+4926:2:3127
+4927:0:4533
+4928:2:4109
+4929:2:4110
+4930:2:4114
+4931:2:4115
+4932:2:4123
+4933:2:4124
+4934:2:4128
+4935:2:4129
+4936:2:4137
+4937:2:4142
+4938:2:4146
+4939:2:4147
+4940:2:4155
+4941:2:4156
+4942:2:4160
+4943:2:4161
+4944:2:4155
+4945:2:4156
+4946:2:4160
+4947:2:4161
+4948:2:4169
+4949:2:4174
+4950:2:4175
+4951:2:4186
+4952:2:4194
+4953:2:4195
+4954:2:4199
+4955:2:4204
+4956:2:4205
+4957:2:4216
+4958:2:4217
+4959:2:4218
+4960:2:4216
+4961:2:4217
+4962:2:4218
+4963:2:4229
+4964:0:4533
+4965:2:3127
+4966:0:4533
+4967:1:1554
+4968:1:1558
+4969:1:1559
+4970:1:1563
+4971:1:1564
+4972:1:1572
+4973:1:1580
+4974:1:1581
+4975:1:1585
+4976:1:1589
+4977:1:1590
+4978:1:1585
+4979:1:1589
+4980:1:1590
+4981:1:1594
+4982:1:1601
+4983:1:1608
+4984:1:1609
+4985:1:1616
+4986:1:1621
+4987:1:1628
+4988:1:1629
+4989:1:1628
+4990:1:1629
+4991:1:1636
+4992:0:4533
+4993:1:11
+4994:0:4533
+4995:2:3975
+4996:2:3976
+4997:2:3980
+4998:2:3981
+4999:2:3989
+5000:2:3990
+5001:2:3994
+5002:2:3995
+5003:2:4003
+5004:2:4008
+5005:2:4012
+5006:2:4013
+5007:2:4021
+5008:2:4022
+5009:2:4026
+5010:2:4027
+5011:2:4021
+5012:2:4022
+5013:2:4026
+5014:2:4027
+5015:2:4035
+5016:2:4040
+5017:2:4041
+5018:2:4052
+5019:2:4060
+5020:2:4061
+5021:2:4065
+5022:2:4070
+5023:2:4071
+5024:2:4082
+5025:2:4083
+5026:2:4084
+5027:2:4082
+5028:2:4083
+5029:2:4084
+5030:2:4095
+5031:2:4103
+5032:0:4533
+5033:2:3127
+5034:0:4533
+5035:2:4109
+5036:2:4110
+5037:2:4114
+5038:2:4115
+5039:2:4123
+5040:2:4124
+5041:2:4128
+5042:2:4129
+5043:2:4137
+5044:2:4142
+5045:2:4146
+5046:2:4147
+5047:2:4155
+5048:2:4156
+5049:2:4160
+5050:2:4161
+5051:2:4155
+5052:2:4156
+5053:2:4160
+5054:2:4161
+5055:2:4169
+5056:2:4174
+5057:2:4175
+5058:2:4186
+5059:2:4194
+5060:2:4195
+5061:2:4199
+5062:2:4204
+5063:2:4205
+5064:2:4216
+5065:2:4217
+5066:2:4218
+5067:2:4216
+5068:2:4217
+5069:2:4218
+5070:2:4229
+5071:0:4533
+5072:2:3127
+5073:0:4533
+5074:1:1646
+5075:1:1647
+5076:1:1651
+5077:1:1652
+5078:1:1660
+5079:1:1661
+5080:1:1665
+5081:1:1666
+5082:1:1674
+5083:1:1679
+5084:1:1683
+5085:1:1684
+5086:1:1692
+5087:1:1693
+5088:1:1697
+5089:1:1698
+5090:1:1692
+5091:1:1693
+5092:1:1697
+5093:1:1698
+5094:1:1706
+5095:1:1711
+5096:1:1712
+5097:1:1723
+5098:1:1724
+5099:1:1725
+5100:1:1736
+5101:1:1741
+5102:1:1742
+5103:1:1753
+5104:1:1754
+5105:1:1755
+5106:1:1753
+5107:1:1754
+5108:1:1755
+5109:1:1766
+5110:0:4533
+5111:1:11
+5112:0:4533
+5113:2:3975
+5114:2:3976
+5115:2:3980
+5116:2:3981
+5117:2:3989
+5118:2:3990
+5119:2:3994
+5120:2:3995
+5121:2:4003
+5122:2:4008
+5123:2:4012
+5124:2:4013
+5125:2:4021
+5126:2:4022
+5127:2:4026
+5128:2:4027
+5129:2:4021
+5130:2:4022
+5131:2:4026
+5132:2:4027
+5133:2:4035
+5134:2:4040
+5135:2:4041
+5136:2:4052
+5137:2:4060
+5138:2:4061
+5139:2:4065
+5140:2:4070
+5141:2:4071
+5142:2:4082
+5143:2:4083
+5144:2:4084
+5145:2:4082
+5146:2:4083
+5147:2:4084
+5148:2:4095
+5149:2:4103
+5150:0:4533
+5151:2:3127
+5152:0:4533
+5153:2:4109
+5154:2:4110
+5155:2:4114
+5156:2:4115
+5157:2:4123
+5158:2:4124
+5159:2:4128
+5160:2:4129
+5161:2:4137
+5162:2:4142
+5163:2:4146
+5164:2:4147
+5165:2:4155
+5166:2:4156
+5167:2:4160
+5168:2:4161
+5169:2:4155
+5170:2:4156
+5171:2:4160
+5172:2:4161
+5173:2:4169
+5174:2:4174
+5175:2:4175
+5176:2:4186
+5177:2:4194
+5178:2:4195
+5179:2:4199
+5180:2:4204
+5181:2:4205
+5182:2:4216
+5183:2:4217
+5184:2:4218
+5185:2:4216
+5186:2:4217
+5187:2:4218
+5188:2:4229
+5189:0:4533
+5190:2:3127
+5191:0:4533
+5192:1:1775
+5193:1:1776
+5194:1:1780
+5195:1:1781
+5196:1:1789
+5197:1:1790
+5198:1:1794
+5199:1:1795
+5200:1:1803
+5201:1:1808
+5202:1:1812
+5203:1:1813
+5204:1:1821
+5205:1:1822
+5206:1:1826
+5207:1:1827
+5208:1:1821
+5209:1:1822
+5210:1:1826
+5211:1:1827
+5212:1:1835
+5213:1:1840
+5214:1:1841
+5215:1:1852
+5216:1:1853
+5217:1:1854
+5218:1:1865
+5219:1:1870
+5220:1:1871
+5221:1:1882
+5222:1:1883
+5223:1:1884
+5224:1:1882
+5225:1:1883
+5226:1:1884
+5227:1:1895
+5228:1:1902
+5229:1:1906
+5230:0:4533
+5231:1:11
+5232:0:4533
+5233:2:3975
+5234:2:3976
+5235:2:3980
+5236:2:3981
+5237:2:3989
+5238:2:3990
+5239:2:3994
+5240:2:3995
+5241:2:4003
+5242:2:4008
+5243:2:4012
+5244:2:4013
+5245:2:4021
+5246:2:4022
+5247:2:4026
+5248:2:4027
+5249:2:4021
+5250:2:4022
+5251:2:4026
+5252:2:4027
+5253:2:4035
+5254:2:4040
+5255:2:4041
+5256:2:4052
+5257:2:4060
+5258:2:4061
+5259:2:4065
+5260:2:4070
+5261:2:4071
+5262:2:4082
+5263:2:4083
+5264:2:4084
+5265:2:4082
+5266:2:4083
+5267:2:4084
+5268:2:4095
+5269:2:4103
+5270:0:4533
+5271:2:3127
+5272:0:4533
+5273:2:4109
+5274:2:4110
+5275:2:4114
+5276:2:4115
+5277:2:4123
+5278:2:4124
+5279:2:4128
+5280:2:4129
+5281:2:4137
+5282:2:4142
+5283:2:4146
+5284:2:4147
+5285:2:4155
+5286:2:4156
+5287:2:4160
+5288:2:4161
+5289:2:4155
+5290:2:4156
+5291:2:4160
+5292:2:4161
+5293:2:4169
+5294:2:4174
+5295:2:4175
+5296:2:4186
+5297:2:4194
+5298:2:4195
+5299:2:4199
+5300:2:4204
+5301:2:4205
+5302:2:4216
+5303:2:4217
+5304:2:4218
+5305:2:4216
+5306:2:4217
+5307:2:4218
+5308:2:4229
+5309:0:4533
+5310:2:3127
+5311:0:4533
+5312:1:1907
+5313:1:1908
+5314:1:1912
+5315:1:1913
+5316:1:1921
+5317:1:1922
+5318:1:1923
+5319:1:1935
+5320:1:1940
+5321:1:1944
+5322:1:1945
+5323:1:1953
+5324:1:1954
+5325:1:1958
+5326:1:1959
+5327:1:1953
+5328:1:1954
+5329:1:1958
+5330:1:1959
+5331:1:1967
+5332:1:1972
+5333:1:1973
+5334:1:1984
+5335:1:1985
+5336:1:1986
+5337:1:1997
+5338:1:2002
+5339:1:2003
+5340:1:2014
+5341:1:2015
+5342:1:2016
+5343:1:2014
+5344:1:2015
+5345:1:2016
+5346:1:2027
+5347:0:4533
+5348:1:11
+5349:0:4533
+5350:2:3975
+5351:2:3976
+5352:2:3980
+5353:2:3981
+5354:2:3989
+5355:2:3990
+5356:2:3994
+5357:2:3995
+5358:2:4003
+5359:2:4008
+5360:2:4012
+5361:2:4013
+5362:2:4021
+5363:2:4022
+5364:2:4026
+5365:2:4027
+5366:2:4021
+5367:2:4022
+5368:2:4026
+5369:2:4027
+5370:2:4035
+5371:2:4040
+5372:2:4041
+5373:2:4052
+5374:2:4060
+5375:2:4061
+5376:2:4065
+5377:2:4070
+5378:2:4071
+5379:2:4082
+5380:2:4083
+5381:2:4084
+5382:2:4082
+5383:2:4083
+5384:2:4084
+5385:2:4095
+5386:2:4103
+5387:0:4533
+5388:2:3127
+5389:0:4533
+5390:2:4109
+5391:2:4110
+5392:2:4114
+5393:2:4115
+5394:2:4123
+5395:2:4124
+5396:2:4128
+5397:2:4129
+5398:2:4137
+5399:2:4142
+5400:2:4146
+5401:2:4147
+5402:2:4155
+5403:2:4156
+5404:2:4160
+5405:2:4161
+5406:2:4155
+5407:2:4156
+5408:2:4160
+5409:2:4161
+5410:2:4169
+5411:2:4174
+5412:2:4175
+5413:2:4186
+5414:2:4194
+5415:2:4195
+5416:2:4199
+5417:2:4204
+5418:2:4205
+5419:2:4216
+5420:2:4217
+5421:2:4218
+5422:2:4216
+5423:2:4217
+5424:2:4218
+5425:2:4229
+5426:0:4533
+5427:2:3127
+5428:0:4533
+5429:1:2036
+5430:1:2037
+5431:0:4533
+5432:1:11
+5433:0:4533
+5434:2:3975
+5435:2:3976
+5436:2:3980
+5437:2:3981
+5438:2:3989
+5439:2:3990
+5440:2:3994
+5441:2:3995
+5442:2:4003
+5443:2:4008
+5444:2:4012
+5445:2:4013
+5446:2:4021
+5447:2:4022
+5448:2:4026
+5449:2:4027
+5450:2:4021
+5451:2:4022
+5452:2:4026
+5453:2:4027
+5454:2:4035
+5455:2:4040
+5456:2:4041
+5457:2:4052
+5458:2:4060
+5459:2:4061
+5460:2:4065
+5461:2:4070
+5462:2:4071
+5463:2:4082
+5464:2:4083
+5465:2:4084
+5466:2:4082
+5467:2:4083
+5468:2:4084
+5469:2:4095
+5470:2:4103
+5471:0:4533
+5472:2:3127
+5473:0:4533
+5474:2:4109
+5475:2:4110
+5476:2:4114
+5477:2:4115
+5478:2:4123
+5479:2:4124
+5480:2:4128
+5481:2:4129
+5482:2:4137
+5483:2:4142
+5484:2:4146
+5485:2:4147
+5486:2:4155
+5487:2:4156
+5488:2:4160
+5489:2:4161
+5490:2:4155
+5491:2:4156
+5492:2:4160
+5493:2:4161
+5494:2:4169
+5495:2:4174
+5496:2:4175
+5497:2:4186
+5498:2:4194
+5499:2:4195
+5500:2:4199
+5501:2:4204
+5502:2:4205
+5503:2:4216
+5504:2:4217
+5505:2:4218
+5506:2:4216
+5507:2:4217
+5508:2:4218
+5509:2:4229
+5510:0:4533
+5511:2:3127
+5512:0:4533
+5513:1:2043
+5514:1:2044
+5515:1:2048
+5516:1:2049
+5517:1:2057
+5518:1:2058
+5519:1:2062
+5520:1:2063
+5521:1:2071
+5522:1:2076
+5523:1:2080
+5524:1:2081
+5525:1:2089
+5526:1:2090
+5527:1:2094
+5528:1:2095
+5529:1:2089
+5530:1:2090
+5531:1:2094
+5532:1:2095
+5533:1:2103
+5534:1:2108
+5535:1:2109
+5536:1:2120
+5537:1:2121
+5538:1:2122
+5539:1:2133
+5540:1:2138
+5541:1:2139
+5542:1:2150
+5543:1:2151
+5544:1:2152
+5545:1:2150
+5546:1:2151
+5547:1:2152
+5548:1:2163
+5549:0:4533
+5550:1:11
+5551:0:4533
+5552:2:3975
+5553:2:3976
+5554:2:3980
+5555:2:3981
+5556:2:3989
+5557:2:3990
+5558:2:3994
+5559:2:3995
+5560:2:4003
+5561:2:4008
+5562:2:4012
+5563:2:4013
+5564:2:4021
+5565:2:4022
+5566:2:4026
+5567:2:4027
+5568:2:4021
+5569:2:4022
+5570:2:4026
+5571:2:4027
+5572:2:4035
+5573:2:4040
+5574:2:4041
+5575:2:4052
+5576:2:4060
+5577:2:4061
+5578:2:4065
+5579:2:4070
+5580:2:4071
+5581:2:4082
+5582:2:4083
+5583:2:4084
+5584:2:4082
+5585:2:4083
+5586:2:4084
+5587:2:4095
+5588:2:4103
+5589:0:4533
+5590:2:3127
+5591:0:4533
+5592:2:4109
+5593:2:4110
+5594:2:4114
+5595:2:4115
+5596:2:4123
+5597:2:4124
+5598:2:4128
+5599:2:4129
+5600:2:4137
+5601:2:4142
+5602:2:4146
+5603:2:4147
+5604:2:4155
+5605:2:4156
+5606:2:4160
+5607:2:4161
+5608:2:4155
+5609:2:4156
+5610:2:4160
+5611:2:4161
+5612:2:4169
+5613:2:4174
+5614:2:4175
+5615:2:4186
+5616:2:4194
+5617:2:4195
+5618:2:4199
+5619:2:4204
+5620:2:4205
+5621:2:4216
+5622:2:4217
+5623:2:4218
+5624:2:4216
+5625:2:4217
+5626:2:4218
+5627:2:4229
+5628:0:4533
+5629:2:3127
+5630:0:4533
+5631:1:2172
+5632:1:2173
+5633:1:2177
+5634:1:2178
+5635:1:2186
+5636:1:2187
+5637:1:2191
+5638:1:2192
+5639:1:2200
+5640:1:2205
+5641:1:2209
+5642:1:2210
+5643:1:2218
+5644:1:2219
+5645:1:2223
+5646:1:2224
+5647:1:2218
+5648:1:2219
+5649:1:2223
+5650:1:2224
+5651:1:2232
+5652:1:2237
+5653:1:2238
+5654:1:2249
+5655:1:2250
+5656:1:2251
+5657:1:2262
+5658:1:2267
+5659:1:2268
+5660:1:2279
+5661:1:2280
+5662:1:2281
+5663:1:2279
+5664:1:2280
+5665:1:2281
+5666:1:2292
+5667:1:2299
+5668:0:4533
+5669:1:11
+5670:0:4533
+5671:2:3975
+5672:2:3976
+5673:2:3980
+5674:2:3981
+5675:2:3989
+5676:2:3990
+5677:2:3994
+5678:2:3995
+5679:2:4003
+5680:2:4008
+5681:2:4012
+5682:2:4013
+5683:2:4021
+5684:2:4022
+5685:2:4026
+5686:2:4027
+5687:2:4021
+5688:2:4022
+5689:2:4026
+5690:2:4027
+5691:2:4035
+5692:2:4040
+5693:2:4041
+5694:2:4052
+5695:2:4060
+5696:2:4061
+5697:2:4065
+5698:2:4070
+5699:2:4071
+5700:2:4082
+5701:2:4083
+5702:2:4084
+5703:2:4082
+5704:2:4083
+5705:2:4084
+5706:2:4095
+5707:2:4103
+5708:0:4533
+5709:2:3127
+5710:0:4533
+5711:2:4109
+5712:2:4110
+5713:2:4114
+5714:2:4115
+5715:2:4123
+5716:2:4124
+5717:2:4128
+5718:2:4129
+5719:2:4137
+5720:2:4142
+5721:2:4146
+5722:2:4147
+5723:2:4155
+5724:2:4156
+5725:2:4160
+5726:2:4161
+5727:2:4155
+5728:2:4156
+5729:2:4160
+5730:2:4161
+5731:2:4169
+5732:2:4174
+5733:2:4175
+5734:2:4186
+5735:2:4194
+5736:2:4195
+5737:2:4199
+5738:2:4204
+5739:2:4205
+5740:2:4216
+5741:2:4217
+5742:2:4218
+5743:2:4216
+5744:2:4217
+5745:2:4218
+5746:2:4229
+5747:0:4533
+5748:2:3127
+5749:0:4533
+5750:1:2435
+5751:1:2439
+5752:1:2440
+5753:1:2444
+5754:1:2445
+5755:1:2453
+5756:1:2461
+5757:1:2462
+5758:1:2466
+5759:1:2470
+5760:1:2471
+5761:1:2466
+5762:1:2470
+5763:1:2471
+5764:1:2475
+5765:1:2482
+5766:1:2489
+5767:1:2490
+5768:1:2497
+5769:1:2502
+5770:1:2509
+5771:1:2510
+5772:1:2509
+5773:1:2510
+5774:1:2517
+5775:0:4533
+5776:1:11
+5777:0:4533
+5778:2:3975
+5779:2:3976
+5780:2:3980
+5781:2:3981
+5782:2:3989
+5783:2:3990
+5784:2:3994
+5785:2:3995
+5786:2:4003
+5787:2:4008
+5788:2:4012
+5789:2:4013
+5790:2:4021
+5791:2:4022
+5792:2:4026
+5793:2:4027
+5794:2:4021
+5795:2:4022
+5796:2:4026
+5797:2:4027
+5798:2:4035
+5799:2:4040
+5800:2:4041
+5801:2:4052
+5802:2:4060
+5803:2:4061
+5804:2:4065
+5805:2:4070
+5806:2:4071
+5807:2:4082
+5808:2:4083
+5809:2:4084
+5810:2:4082
+5811:2:4083
+5812:2:4084
+5813:2:4095
+5814:2:4103
+5815:0:4533
+5816:2:3127
+5817:0:4533
+5818:2:4109
+5819:2:4110
+5820:2:4114
+5821:2:4115
+5822:2:4123
+5823:2:4124
+5824:2:4128
+5825:2:4129
+5826:2:4137
+5827:2:4142
+5828:2:4146
+5829:2:4147
+5830:2:4155
+5831:2:4156
+5832:2:4160
+5833:2:4161
+5834:2:4155
+5835:2:4156
+5836:2:4160
+5837:2:4161
+5838:2:4169
+5839:2:4174
+5840:2:4175
+5841:2:4186
+5842:2:4194
+5843:2:4195
+5844:2:4199
+5845:2:4204
+5846:2:4205
+5847:2:4216
+5848:2:4217
+5849:2:4218
+5850:2:4216
+5851:2:4217
+5852:2:4218
+5853:2:4229
+5854:0:4533
+5855:2:3127
+5856:0:4533
+5857:1:2527
+5858:1:2528
+5859:1:2532
+5860:1:2533
+5861:1:2541
+5862:1:2542
+5863:1:2546
+5864:1:2547
+5865:1:2555
+5866:1:2560
+5867:1:2564
+5868:1:2565
+5869:1:2573
+5870:1:2574
+5871:1:2578
+5872:1:2579
+5873:1:2573
+5874:1:2574
+5875:1:2578
+5876:1:2579
+5877:1:2587
+5878:1:2592
+5879:1:2593
+5880:1:2604
+5881:1:2605
+5882:1:2606
+5883:1:2617
+5884:1:2622
+5885:1:2623
+5886:1:2634
+5887:1:2635
+5888:1:2636
+5889:1:2634
+5890:1:2635
+5891:1:2636
+5892:1:2647
+5893:0:4533
+5894:1:11
+5895:0:4533
+5896:2:3975
+5897:2:3976
+5898:2:3980
+5899:2:3981
+5900:2:3989
+5901:2:3990
+5902:2:3994
+5903:2:3995
+5904:2:4003
+5905:2:4008
+5906:2:4012
+5907:2:4013
+5908:2:4021
+5909:2:4022
+5910:2:4026
+5911:2:4027
+5912:2:4021
+5913:2:4022
+5914:2:4026
+5915:2:4027
+5916:2:4035
+5917:2:4040
+5918:2:4041
+5919:2:4052
+5920:2:4060
+5921:2:4061
+5922:2:4065
+5923:2:4070
+5924:2:4071
+5925:2:4082
+5926:2:4083
+5927:2:4084
+5928:2:4082
+5929:2:4083
+5930:2:4084
+5931:2:4095
+5932:2:4103
+5933:0:4533
+5934:2:3127
+5935:0:4533
+5936:2:4109
+5937:2:4110
+5938:2:4114
+5939:2:4115
+5940:2:4123
+5941:2:4124
+5942:2:4128
+5943:2:4129
+5944:2:4137
+5945:2:4142
+5946:2:4146
+5947:2:4147
+5948:2:4155
+5949:2:4156
+5950:2:4160
+5951:2:4161
+5952:2:4155
+5953:2:4156
+5954:2:4160
+5955:2:4161
+5956:2:4169
+5957:2:4174
+5958:2:4175
+5959:2:4186
+5960:2:4194
+5961:2:4195
+5962:2:4199
+5963:2:4204
+5964:2:4205
+5965:2:4216
+5966:2:4217
+5967:2:4218
+5968:2:4216
+5969:2:4217
+5970:2:4218
+5971:2:4229
+5972:0:4533
+5973:2:3127
+5974:0:4533
+5975:1:2656
+5976:0:4533
+5977:2:3975
+5978:2:3976
+5979:2:3980
+5980:2:3981
+5981:2:3989
+5982:2:3990
+5983:2:3994
+5984:2:3995
+5985:2:4003
+5986:2:4008
+5987:2:4012
+5988:2:4013
+5989:2:4021
+5990:2:4022
+5991:2:4026
+5992:2:4027
+5993:2:4021
+5994:2:4022
+5995:2:4026
+5996:2:4027
+5997:2:4035
+5998:2:4040
+5999:2:4041
+6000:2:4052
+6001:2:4060
+6002:2:4061
+6003:2:4065
+6004:2:4070
+6005:2:4071
+6006:2:4082
+6007:2:4083
+6008:2:4084
+6009:2:4082
+6010:2:4083
+6011:2:4084
+6012:2:4095
+6013:2:4103
+6014:0:4533
+6015:2:3127
+6016:0:4533
+6017:2:4109
+6018:2:4110
+6019:2:4114
+6020:2:4115
+6021:2:4123
+6022:2:4124
+6023:2:4128
+6024:2:4129
+6025:2:4137
+6026:2:4142
+6027:2:4146
+6028:2:4147
+6029:2:4155
+6030:2:4156
+6031:2:4160
+6032:2:4161
+6033:2:4155
+6034:2:4156
+6035:2:4160
+6036:2:4161
+6037:2:4169
+6038:2:4174
+6039:2:4175
+6040:2:4186
+6041:2:4194
+6042:2:4195
+6043:2:4199
+6044:2:4204
+6045:2:4205
+6046:2:4216
+6047:2:4217
+6048:2:4218
+6049:2:4216
+6050:2:4217
+6051:2:4218
+6052:2:4229
+6053:0:4533
+6054:2:3127
+6055:0:4533
+6056:1:3066
+6057:1:3073
+6058:1:3074
+6059:1:3081
+6060:1:3086
+6061:1:3093
+6062:1:3094
+6063:1:3093
+6064:1:3094
+6065:1:3101
+6066:1:3105
+6067:0:4533
+6068:2:3975
+6069:2:3976
+6070:2:3980
+6071:2:3981
+6072:2:3989
+6073:2:3990
+6074:2:3994
+6075:2:3995
+6076:2:4003
+6077:2:4008
+6078:2:4012
+6079:2:4013
+6080:2:4021
+6081:2:4022
+6082:2:4026
+6083:2:4027
+6084:2:4021
+6085:2:4022
+6086:2:4026
+6087:2:4027
+6088:2:4035
+6089:2:4040
+6090:2:4041
+6091:2:4052
+6092:2:4060
+6093:2:4061
+6094:2:4065
+6095:2:4070
+6096:2:4071
+6097:2:4082
+6098:2:4083
+6099:2:4084
+6100:2:4082
+6101:2:4083
+6102:2:4084
+6103:2:4095
+6104:2:4103
+6105:0:4533
+6106:2:3127
+6107:0:4533
+6108:2:4109
+6109:2:4110
+6110:2:4114
+6111:2:4115
+6112:2:4123
+6113:2:4124
+6114:2:4128
+6115:2:4129
+6116:2:4137
+6117:2:4142
+6118:2:4146
+6119:2:4147
+6120:2:4155
+6121:2:4156
+6122:2:4160
+6123:2:4161
+6124:2:4155
+6125:2:4156
+6126:2:4160
+6127:2:4161
+6128:2:4169
+6129:2:4174
+6130:2:4175
+6131:2:4186
+6132:2:4194
+6133:2:4195
+6134:2:4199
+6135:2:4204
+6136:2:4205
+6137:2:4216
+6138:2:4217
+6139:2:4218
+6140:2:4216
+6141:2:4217
+6142:2:4218
+6143:2:4229
+6144:0:4533
+6145:2:3127
+6146:0:4533
+6147:1:2658
+6148:1:2659
+6149:0:4533
+6150:1:11
+6151:0:4533
+6152:2:3975
+6153:2:3976
+6154:2:3980
+6155:2:3981
+6156:2:3989
+6157:2:3990
+6158:2:3994
+6159:2:3995
+6160:2:4003
+6161:2:4008
+6162:2:4012
+6163:2:4013
+6164:2:4021
+6165:2:4022
+6166:2:4026
+6167:2:4027
+6168:2:4021
+6169:2:4022
+6170:2:4026
+6171:2:4027
+6172:2:4035
+6173:2:4040
+6174:2:4041
+6175:2:4052
+6176:2:4060
+6177:2:4061
+6178:2:4065
+6179:2:4070
+6180:2:4071
+6181:2:4082
+6182:2:4083
+6183:2:4084
+6184:2:4082
+6185:2:4083
+6186:2:4084
+6187:2:4095
+6188:2:4103
+6189:0:4533
+6190:2:3127
+6191:0:4533
+6192:2:4109
+6193:2:4110
+6194:2:4114
+6195:2:4115
+6196:2:4123
+6197:2:4124
+6198:2:4128
+6199:2:4129
+6200:2:4137
+6201:2:4142
+6202:2:4146
+6203:2:4147
+6204:2:4155
+6205:2:4156
+6206:2:4160
+6207:2:4161
+6208:2:4155
+6209:2:4156
+6210:2:4160
+6211:2:4161
+6212:2:4169
+6213:2:4174
+6214:2:4175
+6215:2:4186
+6216:2:4194
+6217:2:4195
+6218:2:4199
+6219:2:4204
+6220:2:4205
+6221:2:4216
+6222:2:4217
+6223:2:4218
+6224:2:4216
+6225:2:4217
+6226:2:4218
+6227:2:4229
+6228:0:4533
+6229:2:3127
+6230:0:4533
+6231:1:2660
+6232:1:2664
+6233:1:2665
+6234:1:2669
+6235:1:2673
+6236:1:2674
+6237:1:2678
+6238:1:2686
+6239:1:2687
+6240:1:2691
+6241:1:2695
+6242:1:2696
+6243:1:2691
+6244:1:2695
+6245:1:2696
+6246:1:2700
+6247:1:2707
+6248:1:2714
+6249:1:2715
+6250:1:2722
+6251:1:2727
+6252:1:2734
+6253:1:2735
+6254:1:2734
+6255:1:2735
+6256:1:2742
+6257:0:4533
+6258:1:11
+6259:0:4533
+6260:2:3975
+6261:2:3976
+6262:2:3980
+6263:2:3981
+6264:2:3989
+6265:2:3990
+6266:2:3994
+6267:2:3995
+6268:2:4003
+6269:2:4008
+6270:2:4012
+6271:2:4013
+6272:2:4021
+6273:2:4022
+6274:2:4026
+6275:2:4027
+6276:2:4021
+6277:2:4022
+6278:2:4026
+6279:2:4027
+6280:2:4035
+6281:2:4040
+6282:2:4041
+6283:2:4052
+6284:2:4060
+6285:2:4061
+6286:2:4065
+6287:2:4070
+6288:2:4071
+6289:2:4082
+6290:2:4083
+6291:2:4084
+6292:2:4082
+6293:2:4083
+6294:2:4084
+6295:2:4095
+6296:2:4103
+6297:0:4533
+6298:2:3127
+6299:0:4533
+6300:1:2752
+6301:1:2753
+6302:1:2757
+6303:1:2758
+6304:1:2766
+6305:1:2767
+6306:1:2771
+6307:1:2772
+6308:1:2780
+6309:1:2785
+6310:1:2789
+6311:1:2790
+6312:1:2798
+6313:1:2799
+6314:1:2803
+6315:1:2804
+6316:1:2798
+6317:1:2799
+6318:1:2803
+6319:1:2804
+6320:1:2812
+6321:1:2817
+6322:1:2818
+6323:1:2829
+6324:1:2830
+6325:1:2831
+6326:1:2842
+6327:1:2847
+6328:1:2848
+6329:1:2859
+6330:1:2860
+6331:1:2861
+6332:1:2859
+6333:1:2860
+6334:1:2861
+6335:1:2872
+6336:0:4533
+6337:1:11
+6338:0:4533
+6339:1:2881
+6340:1:2882
+6341:1:2886
+6342:1:2887
+6343:1:2895
+6344:1:2896
+6345:1:2900
+6346:1:2901
+6347:1:2909
+6348:1:2914
+6349:1:2918
+6350:1:2919
+6351:1:2927
+6352:1:2928
+6353:1:2932
+6354:1:2933
+6355:1:2927
+6356:1:2928
+6357:1:2932
+6358:1:2933
+6359:1:2941
+6360:1:2946
+6361:1:2947
+6362:1:2958
+6363:1:2959
+6364:1:2960
+6365:1:2971
+6366:1:2976
+6367:1:2977
+6368:1:2988
+6369:1:2989
+6370:1:2990
+6371:1:2988
+6372:1:2989
+6373:1:2990
+6374:1:3001
+6375:1:3008
+6376:1:3012
+6377:0:4533
+6378:1:11
+6379:0:4533
+6380:1:3013
+-1:-1:-1
+6381:0:4533
+6382:1:3021
+6383:0:4533
+6384:1:3109
+6385:0:4533
+6386:1:9
+6387:0:4533
+6388:1:10
+6389:0:4533
+6390:1:11
+6391:0:4533
+6392:1:12
+6393:1:13
+6394:1:17
+6395:1:18
+6396:1:26
+6397:1:27
+6398:1:28
+6399:1:40
+6400:1:45
+6401:1:49
+6402:1:50
+6403:1:58
+6404:1:59
+6405:1:63
+6406:1:64
+6407:1:58
+6408:1:59
+6409:1:63
+6410:1:64
+6411:1:72
+6412:1:77
+6413:1:78
+6414:1:89
+6415:1:90
+6416:1:91
+6417:1:102
+6418:1:107
+6419:1:108
+6420:1:119
+6421:1:120
+6422:1:121
+6423:1:119
+6424:1:120
+6425:1:121
+6426:1:132
+6427:0:4533
+6428:1:11
+6429:0:4533
+6430:1:141
+6431:1:142
+6432:0:4533
+6433:1:11
+6434:0:4533
+6435:1:148
+6436:1:149
+6437:1:153
+6438:1:154
+6439:1:162
+6440:1:163
+6441:1:167
+6442:1:168
+6443:1:176
+6444:1:181
+6445:1:185
+6446:1:186
+6447:1:194
+6448:1:195
+6449:1:199
+6450:1:200
+6451:1:194
+6452:1:195
+6453:1:199
+6454:1:200
+6455:1:208
+6456:1:213
+6457:1:214
+6458:1:225
+6459:1:226
+6460:1:227
+6461:1:238
+6462:1:243
+6463:1:244
+6464:1:255
+6465:1:256
+6466:1:257
+6467:1:255
+6468:1:256
+6469:1:257
+6470:1:268
+6471:0:4533
+6472:1:11
+6473:0:4533
+6474:1:277
+6475:1:278
+6476:1:282
+6477:1:283
+6478:1:291
+6479:1:292
+6480:1:296
+6481:1:297
+6482:1:305
+6483:1:310
+6484:1:314
+6485:1:315
+6486:1:323
+6487:1:324
+6488:1:328
+6489:1:329
+6490:1:323
+6491:1:324
+6492:1:328
+6493:1:329
+6494:1:337
+6495:1:342
+6496:1:343
+6497:1:354
+6498:1:355
+6499:1:356
+6500:1:367
+6501:1:372
+6502:1:373
+6503:1:384
+6504:1:385
+6505:1:386
+6506:1:384
+6507:1:385
+6508:1:386
+6509:1:397
+6510:1:404
+6511:0:4533
+6512:1:11
+6513:0:4533
+6514:1:540
+6515:1:544
+6516:1:545
+6517:1:549
+6518:1:550
+6519:1:558
+6520:1:566
+6521:1:567
+6522:1:571
+6523:1:575
+6524:1:576
+6525:1:571
+6526:1:575
+6527:1:576
+6528:1:580
+6529:1:587
+6530:1:594
+6531:1:595
+6532:1:602
+6533:1:607
+6534:1:614
+6535:1:615
+6536:1:614
+6537:1:615
+6538:1:622
+6539:0:4533
+6540:1:11
+6541:0:4533
+6542:1:632
+6543:1:633
+6544:1:637
+6545:1:638
+6546:1:646
+6547:1:647
+6548:1:651
+6549:1:652
+6550:1:660
+6551:1:665
+6552:1:669
+6553:1:670
+6554:1:678
+6555:1:679
+6556:1:683
+6557:1:684
+6558:1:678
+6559:1:679
+6560:1:683
+6561:1:684
+6562:1:692
+6563:1:697
+6564:1:698
+6565:1:709
+6566:1:710
+6567:1:711
+6568:1:722
+6569:1:727
+6570:1:728
+6571:1:739
+6572:1:740
+6573:1:741
+6574:1:739
+6575:1:740
+6576:1:741
+6577:1:752
+6578:0:4533
+6579:1:11
+6580:0:4533
+6581:1:761
+6582:1:764
+6583:1:765
+6584:0:4533
+6585:1:11
+6586:0:4533
+6587:1:768
+6588:1:769
+6589:1:773
+6590:1:774
+6591:1:782
+6592:1:783
+6593:1:787
+6594:1:788
+6595:1:796
+6596:1:801
+6597:1:805
+6598:1:806
+6599:1:814
+6600:1:815
+6601:1:819
+6602:1:820
+6603:1:814
+6604:1:815
+6605:1:819
+6606:1:820
+6607:1:828
+6608:1:833
+6609:1:834
+6610:1:845
+6611:1:846
+6612:1:847
+6613:1:858
+6614:1:863
+6615:1:864
+6616:1:875
+6617:1:876
+6618:1:877
+6619:1:875
+6620:1:876
+6621:1:877
+6622:1:888
+6623:0:4533
+6624:1:11
+6625:0:4533
+6626:1:1028
+6627:1:1029
+6628:1:1033
+6629:1:1034
+6630:1:1042
+6631:1:1043
+6632:1:1047
+6633:1:1048
+6634:1:1056
+6635:1:1061
+6636:1:1065
+6637:1:1066
+6638:1:1074
+6639:1:1075
+6640:1:1079
+6641:1:1080
+6642:1:1074
+6643:1:1075
+6644:1:1079
+6645:1:1080
+6646:1:1088
+6647:1:1093
+6648:1:1094
+6649:1:1105
+6650:1:1106
+6651:1:1107
+6652:1:1118
+6653:1:1123
+6654:1:1124
+6655:1:1135
+6656:1:1136
+6657:1:1137
+6658:1:1135
+6659:1:1136
+6660:1:1137
+6661:1:1148
+6662:1:1155
+6663:1:1159
+6664:0:4533
+6665:1:11
+6666:0:4533
+6667:1:1160
+6668:1:1161
+6669:1:1165
+6670:1:1166
+6671:1:1174
+6672:1:1175
+6673:1:1176
+6674:1:1188
+6675:1:1193
+6676:1:1197
+6677:1:1198
+6678:1:1206
+6679:1:1207
+6680:1:1211
+6681:1:1212
+6682:1:1206
+6683:1:1207
+6684:1:1211
+6685:1:1212
+6686:1:1220
+6687:1:1225
+6688:1:1226
+6689:1:1237
+6690:1:1238
+6691:1:1239
+6692:1:1250
+6693:1:1255
+6694:1:1256
+6695:1:1267
+6696:1:1268
+6697:1:1269
+6698:1:1267
+6699:1:1268
+6700:1:1269
+6701:1:1280
+6702:0:4533
+6703:1:11
+6704:0:4533
+6705:1:1289
+6706:0:4533
+6707:1:3023
+6708:1:3030
+6709:1:3031
+6710:1:3038
+6711:1:3043
+6712:1:3050
+6713:1:3051
+6714:1:3050
+6715:1:3051
+6716:1:3058
+6717:1:3062
+6718:0:4533
+6719:1:1291
+6720:1:1292
+6721:0:4533
+6722:1:11
+6723:0:4533
+6724:1:1293
+6725:1:1294
+6726:1:1298
+6727:1:1299
+6728:1:1307
+6729:1:1308
+6730:1:1312
+6731:1:1313
+6732:1:1321
+6733:1:1326
+6734:1:1330
+6735:1:1331
+6736:1:1339
+6737:1:1340
+6738:1:1344
+6739:1:1345
+6740:1:1339
+6741:1:1340
+6742:1:1344
+6743:1:1345
+6744:1:1353
+6745:1:1358
+6746:1:1359
+6747:1:1370
+6748:1:1371
+6749:1:1372
+6750:1:1383
+6751:1:1388
+6752:1:1389
+6753:1:1400
+6754:1:1401
+6755:1:1402
+6756:1:1400
+6757:1:1401
+6758:1:1402
+6759:1:1413
+6760:0:4533
+6761:1:11
+6762:0:4533
+6763:1:1422
+6764:1:1423
+6765:1:1427
+6766:1:1428
+6767:1:1436
+6768:1:1437
+6769:1:1441
+6770:1:1442
+6771:1:1450
+6772:1:1455
+6773:1:1459
+6774:1:1460
+6775:1:1468
+6776:1:1469
+6777:1:1473
+6778:1:1474
+6779:1:1468
+6780:1:1469
+6781:1:1473
+6782:1:1474
+6783:1:1482
+6784:1:1487
+6785:1:1488
+6786:1:1499
+6787:1:1500
+6788:1:1501
+6789:1:1512
+6790:1:1517
+6791:1:1518
+6792:1:1529
+6793:1:1530
+6794:1:1531
+6795:1:1529
+6796:1:1530
+6797:1:1531
+6798:1:1542
+6799:1:1549
+6800:1:1553
+6801:0:4533
+6802:1:11
+6803:0:4533
+6804:1:1554
+6805:1:1558
+6806:1:1559
+6807:1:1563
+6808:1:1564
+6809:1:1572
+6810:1:1580
+6811:1:1581
+6812:1:1585
+6813:1:1589
+6814:1:1590
+6815:1:1585
+6816:1:1589
+6817:1:1590
+6818:1:1594
+6819:1:1601
+6820:1:1608
+6821:1:1609
+6822:1:1616
+6823:1:1621
+6824:1:1628
+6825:1:1629
+6826:1:1628
+6827:1:1629
+6828:1:1636
+6829:0:4533
+6830:1:11
+6831:0:4533
+6832:1:1646
+6833:1:1647
+6834:1:1651
+6835:1:1652
+6836:1:1660
+6837:1:1661
+6838:1:1665
+6839:1:1666
+6840:1:1674
+6841:1:1679
+6842:1:1683
+6843:1:1684
+6844:1:1692
+6845:1:1693
+6846:1:1697
+6847:1:1698
+6848:1:1692
+6849:1:1693
+6850:1:1697
+6851:1:1698
+6852:1:1706
+6853:1:1711
+6854:1:1712
+6855:1:1723
+6856:1:1724
+6857:1:1725
+6858:1:1736
+6859:1:1741
+6860:1:1742
+6861:1:1753
+6862:1:1754
+6863:1:1755
+6864:1:1753
+6865:1:1754
+6866:1:1755
+6867:1:1766
+6868:0:4533
+6869:1:11
+6870:0:4533
+6871:1:1775
+6872:1:1776
+6873:1:1780
+6874:1:1781
+6875:1:1789
+6876:1:1790
+6877:1:1794
+6878:1:1795
+6879:1:1803
+6880:1:1808
+6881:1:1812
+6882:1:1813
+6883:1:1821
+6884:1:1822
+6885:1:1826
+6886:1:1827
+6887:1:1821
+6888:1:1822
+6889:1:1826
+6890:1:1827
+6891:1:1835
+6892:1:1840
+6893:1:1841
+6894:1:1852
+6895:1:1853
+6896:1:1854
+6897:1:1865
+6898:1:1870
+6899:1:1871
+6900:1:1882
+6901:1:1883
+6902:1:1884
+6903:1:1882
+6904:1:1883
+6905:1:1884
+6906:1:1895
+6907:1:1902
+6908:1:1906
+6909:0:4533
+6910:1:11
+6911:0:4533
+6912:1:1907
+6913:1:1908
+6914:1:1912
+6915:1:1913
+6916:1:1921
+6917:1:1922
+6918:1:1923
+6919:1:1935
+6920:1:1940
+6921:1:1944
+6922:1:1945
+6923:1:1953
+6924:1:1954
+6925:1:1958
+6926:1:1959
+6927:1:1953
+6928:1:1954
+6929:1:1958
+6930:1:1959
+6931:1:1967
+6932:1:1972
+6933:1:1973
+6934:1:1984
+6935:1:1985
+6936:1:1986
+6937:1:1997
+6938:1:2002
+6939:1:2003
+6940:1:2014
+6941:1:2015
+6942:1:2016
+6943:1:2014
+6944:1:2015
+6945:1:2016
+6946:1:2027
+6947:0:4533
+6948:1:11
+6949:0:4533
+6950:1:2036
+6951:1:2037
+6952:0:4533
+6953:1:11
+6954:0:4533
+6955:1:2043
+6956:1:2044
+6957:1:2048
+6958:1:2049
+6959:1:2057
+6960:1:2058
+6961:1:2062
+6962:1:2063
+6963:1:2071
+6964:1:2076
+6965:1:2080
+6966:1:2081
+6967:1:2089
+6968:1:2090
+6969:1:2094
+6970:1:2095
+6971:1:2089
+6972:1:2090
+6973:1:2094
+6974:1:2095
+6975:1:2103
+6976:1:2108
+6977:1:2109
+6978:1:2120
+6979:1:2121
+6980:1:2122
+6981:1:2133
+6982:1:2138
+6983:1:2139
+6984:1:2150
+6985:1:2151
+6986:1:2152
+6987:1:2150
+6988:1:2151
+6989:1:2152
+6990:1:2163
+6991:0:4533
+6992:1:11
+6993:0:4533
+6994:1:2172
+6995:1:2173
+6996:1:2177
+6997:1:2178
+6998:1:2186
+6999:1:2187
+7000:1:2191
+7001:1:2192
+7002:1:2200
+7003:1:2205
+7004:1:2209
+7005:1:2210
+7006:1:2218
+7007:1:2219
+7008:1:2223
+7009:1:2224
+7010:1:2218
+7011:1:2219
+7012:1:2223
+7013:1:2224
+7014:1:2232
+7015:1:2237
+7016:1:2238
+7017:1:2249
+7018:1:2250
+7019:1:2251
+7020:1:2262
+7021:1:2267
+7022:1:2268
+7023:1:2279
+7024:1:2280
+7025:1:2281
+7026:1:2279
+7027:1:2280
+7028:1:2281
+7029:1:2292
+7030:1:2299
+7031:0:4533
+7032:1:11
+7033:0:4533
+7034:1:2435
+7035:1:2439
+7036:1:2440
+7037:1:2444
+7038:1:2445
+7039:1:2453
+7040:1:2461
+7041:1:2462
+7042:1:2466
+7043:1:2470
+7044:1:2471
+7045:1:2466
+7046:1:2470
+7047:1:2471
+7048:1:2475
+7049:1:2482
+7050:1:2489
+7051:1:2490
+7052:1:2497
+7053:1:2502
+7054:1:2509
+7055:1:2510
+7056:1:2509
+7057:1:2510
+7058:1:2517
+7059:0:4533
+7060:1:11
+7061:0:4533
+7062:1:2527
+7063:1:2528
+7064:1:2532
+7065:1:2533
+7066:1:2541
+7067:1:2542
+7068:1:2546
+7069:1:2547
+7070:1:2555
+7071:1:2560
+7072:1:2564
+7073:1:2565
+7074:1:2573
+7075:1:2574
+7076:1:2578
+7077:1:2579
+7078:1:2573
+7079:1:2574
+7080:1:2578
+7081:1:2579
+7082:1:2587
+7083:1:2592
+7084:1:2593
+7085:1:2604
+7086:1:2605
+7087:1:2606
+7088:1:2617
+7089:1:2622
+7090:1:2623
+7091:1:2634
+7092:1:2635
+7093:1:2636
+7094:1:2634
+7095:1:2635
+7096:1:2636
+7097:1:2647
+7098:0:4533
+7099:1:11
+7100:0:4533
+7101:1:2656
+7102:0:4533
+7103:1:3066
+7104:1:3073
+7105:1:3074
+7106:1:3081
+7107:1:3086
+7108:1:3093
+7109:1:3094
+7110:1:3093
+7111:1:3094
+7112:1:3101
+7113:1:3105
+7114:0:4533
+7115:1:2658
+7116:1:2659
+7117:0:4533
+7118:1:11
+7119:0:4533
+7120:1:2660
+7121:1:2664
+7122:1:2665
+7123:1:2669
+7124:1:2673
+7125:1:2674
+7126:1:2678
+7127:1:2686
+7128:1:2687
+7129:1:2691
+7130:1:2695
+7131:1:2696
+7132:1:2691
+7133:1:2695
+7134:1:2696
+7135:1:2700
+7136:1:2707
+7137:1:2714
+7138:1:2715
+7139:1:2722
+7140:1:2727
+7141:1:2734
+7142:1:2735
+7143:1:2734
+7144:1:2735
+7145:1:2742
+7146:0:4533
+7147:1:11
+7148:0:4533
+7149:1:2752
+7150:1:2753
+7151:1:2757
+7152:1:2758
+7153:1:2766
+7154:1:2767
+7155:1:2771
+7156:1:2772
+7157:1:2780
+7158:1:2785
+7159:1:2789
+7160:1:2790
+7161:1:2798
+7162:1:2799
+7163:1:2803
+7164:1:2804
+7165:1:2798
+7166:1:2799
+7167:1:2803
+7168:1:2804
+7169:1:2812
+7170:1:2817
+7171:1:2818
+7172:1:2829
+7173:1:2830
+7174:1:2831
+7175:1:2842
+7176:1:2847
+7177:1:2848
+7178:1:2859
+7179:1:2860
+7180:1:2861
+7181:1:2859
+7182:1:2860
+7183:1:2861
+7184:1:2872
+7185:0:4533
+7186:2:4109
+7187:2:4110
+7188:2:4114
+7189:2:4115
+7190:2:4123
+7191:2:4124
+7192:2:4128
+7193:2:4129
+7194:2:4137
+7195:2:4142
+7196:2:4146
+7197:2:4147
+7198:2:4155
+7199:2:4156
+7200:2:4160
+7201:2:4161
+7202:2:4155
+7203:2:4156
+7204:2:4160
+7205:2:4161
+7206:2:4169
+7207:2:4174
+7208:2:4175
+7209:2:4186
+7210:2:4194
+7211:2:4195
+7212:2:4199
+7213:2:4204
+7214:2:4205
+7215:2:4216
+7216:2:4217
+7217:2:4218
+7218:2:4216
+7219:2:4217
+7220:2:4218
+7221:2:4229
+7222:0:4533
+7223:2:3127
+7224:0:4533
+7225:1:11
+7226:0:4533
+7227:1:2881
+7228:1:2882
+7229:1:2886
+7230:1:2887
+7231:1:2895
+7232:1:2896
+7233:1:2900
+7234:1:2901
+7235:1:2909
+7236:1:2914
+7237:1:2918
+7238:1:2919
+7239:1:2927
+7240:1:2928
+7241:1:2932
+7242:1:2933
+7243:1:2927
+7244:1:2928
+7245:1:2932
+7246:1:2933
+7247:1:2941
+7248:1:2946
+7249:1:2947
+7250:1:2958
+7251:1:2959
+7252:1:2960
+7253:1:2971
+7254:1:2976
+7255:1:2977
+7256:1:2988
+7257:1:2989
+7258:1:2990
+7259:1:2988
+7260:1:2989
+7261:1:2990
+7262:1:3001
+7263:1:3008
+7264:1:3012
+7265:0:4533
+7266:2:3975
+7267:2:3976
+7268:2:3980
+7269:2:3981
+7270:2:3989
+7271:2:3990
+7272:2:3994
+7273:2:3995
+7274:2:4003
+7275:2:4008
+7276:2:4012
+7277:2:4013
+7278:2:4021
+7279:2:4022
+7280:2:4026
+7281:2:4027
+7282:2:4021
+7283:2:4022
+7284:2:4026
+7285:2:4027
+7286:2:4035
+7287:2:4040
+7288:2:4041
+7289:2:4052
+7290:2:4060
+7291:2:4061
+7292:2:4065
+7293:2:4070
+7294:2:4071
+7295:2:4082
+7296:2:4083
+7297:2:4084
+7298:2:4082
+7299:2:4083
+7300:2:4084
+7301:2:4095
+7302:2:4103
+7303:0:4533
+7304:2:3127
+7305:0:4533
+7306:1:11
+7307:0:4533
+7308:2:4109
+7309:2:4110
+7310:2:4114
+7311:2:4115
+7312:2:4123
+7313:2:4124
+7314:2:4128
+7315:2:4129
+7316:2:4137
+7317:2:4142
+7318:2:4146
+7319:2:4147
+7320:2:4155
+7321:2:4156
+7322:2:4160
+7323:2:4161
+7324:2:4155
+7325:2:4156
+7326:2:4160
+7327:2:4161
+7328:2:4169
+7329:2:4174
+7330:2:4175
+7331:2:4186
+7332:2:4194
+7333:2:4195
+7334:2:4199
+7335:2:4204
+7336:2:4205
+7337:2:4216
+7338:2:4217
+7339:2:4218
+7340:2:4216
+7341:2:4217
+7342:2:4218
+7343:2:4229
+7344:0:4533
+7345:2:3127
+7346:0:4533
+7347:2:3975
+7348:2:3976
+7349:2:3980
+7350:2:3981
+7351:2:3989
+7352:2:3990
+7353:2:3994
+7354:2:3995
+7355:2:4003
+7356:2:4008
+7357:2:4012
+7358:2:4013
+7359:2:4021
+7360:2:4022
+7361:2:4026
+7362:2:4027
+7363:2:4021
+7364:2:4022
+7365:2:4026
+7366:2:4027
+7367:2:4035
+7368:2:4040
+7369:2:4041
+7370:2:4052
+7371:2:4060
+7372:2:4061
+7373:2:4065
+7374:2:4070
+7375:2:4071
+7376:2:4082
+7377:2:4083
+7378:2:4084
+7379:2:4082
+7380:2:4083
+7381:2:4084
+7382:2:4095
+7383:2:4103
+7384:0:4533
+7385:2:3127
+7386:0:4533
+7387:1:3013
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/.input.define b/formal-model/urcu-controldataflow-intel-ipi-compress/.input.define
new file mode 100644 (file)
index 0000000..8d304f5
--- /dev/null
@@ -0,0 +1,2 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/.input.spin b/formal-model/urcu-controldataflow-intel-ipi-compress/.input.spin
new file mode 100644 (file)
index 0000000..6cccb27
--- /dev/null
@@ -0,0 +1,1341 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/.input.spin.trail b/formal-model/urcu-controldataflow-intel-ipi-compress/.input.spin.trail
new file mode 100644 (file)
index 0000000..e9b06b3
--- /dev/null
@@ -0,0 +1,1699 @@
+-2:3:-2
+-4:-4:-4
+1:0:3997
+2:2:2536
+3:2:2541
+4:2:2545
+5:2:2553
+6:2:2557
+7:2:2561
+8:0:3997
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:3997
+16:3:3967
+17:3:3970
+18:3:3977
+19:3:3984
+20:3:3987
+21:3:3991
+22:3:3992
+23:0:3997
+24:3:3994
+25:0:3997
+26:2:2565
+27:0:3997
+28:2:2571
+29:0:3997
+30:2:2572
+31:0:3997
+32:2:2573
+33:0:3997
+34:2:2574
+35:0:3997
+36:2:2575
+37:0:3997
+38:2:2576
+39:2:2577
+40:2:2581
+41:2:2582
+42:2:2590
+43:2:2591
+44:2:2595
+45:2:2596
+46:2:2604
+47:2:2609
+48:2:2613
+49:2:2614
+50:2:2622
+51:2:2623
+52:2:2627
+53:2:2628
+54:2:2622
+55:2:2623
+56:2:2627
+57:2:2628
+58:2:2636
+59:2:2641
+60:2:2648
+61:2:2649
+62:2:2656
+63:2:2661
+64:2:2668
+65:2:2669
+66:2:2668
+67:2:2669
+68:2:2676
+69:2:2686
+70:0:3997
+71:2:2575
+72:0:3997
+73:2:2690
+74:2:2694
+75:2:2695
+76:2:2699
+77:2:2703
+78:2:2704
+79:2:2708
+80:2:2716
+81:2:2717
+82:2:2721
+83:2:2725
+84:2:2726
+85:2:2721
+86:2:2722
+87:2:2730
+88:0:3997
+89:2:2575
+90:0:3997
+91:2:2738
+92:2:2739
+93:2:2740
+94:0:3997
+95:2:2575
+96:0:3997
+97:2:2745
+98:0:3997
+99:2:3572
+100:2:3573
+101:2:3577
+102:2:3581
+103:2:3582
+104:2:3586
+105:2:3591
+106:2:3599
+107:2:3603
+108:2:3604
+109:2:3599
+110:2:3603
+111:2:3604
+112:2:3608
+113:2:3615
+114:2:3622
+115:2:3623
+116:2:3630
+117:2:3635
+118:2:3642
+119:2:3643
+120:2:3642
+121:2:3643
+122:2:3650
+123:2:3654
+124:0:3997
+125:2:3659
+126:0:3997
+127:2:3660
+128:0:3997
+129:2:3661
+130:0:3997
+131:2:3662
+132:0:3997
+133:1:29
+134:0:3997
+135:1:35
+136:0:3997
+137:1:36
+138:0:3997
+139:2:3663
+140:0:3997
+141:1:37
+142:0:3997
+143:2:3662
+144:0:3997
+145:1:38
+146:0:3997
+147:2:3663
+148:0:3997
+149:1:39
+150:0:3997
+151:2:3662
+152:0:3997
+153:1:40
+154:0:3997
+155:2:3663
+156:0:3997
+157:1:41
+158:0:3997
+159:1:42
+160:0:3997
+161:1:43
+162:0:3997
+163:2:3662
+164:0:3997
+165:1:44
+166:0:3997
+167:2:3663
+168:0:3997
+169:1:53
+170:0:3997
+171:2:3662
+172:0:3997
+173:1:57
+174:1:58
+175:1:62
+176:1:66
+177:1:67
+178:1:71
+179:1:79
+180:1:80
+181:1:84
+182:1:88
+183:1:89
+184:1:84
+185:1:88
+186:1:89
+187:1:93
+188:1:100
+189:1:107
+190:1:108
+191:1:115
+192:1:120
+193:1:127
+194:1:128
+195:1:127
+196:1:128
+197:1:135
+198:1:139
+199:0:3997
+200:2:3663
+201:0:3997
+202:1:144
+203:0:3997
+204:2:3664
+205:0:3997
+206:2:3669
+207:0:3997
+208:2:3670
+209:0:3997
+210:2:3678
+211:2:3679
+212:2:3683
+213:2:3687
+214:2:3688
+215:2:3692
+216:2:3700
+217:2:3701
+218:2:3705
+219:2:3709
+220:2:3710
+221:2:3705
+222:2:3709
+223:2:3710
+224:2:3714
+225:2:3721
+226:2:3728
+227:2:3729
+228:2:3736
+229:2:3741
+230:2:3748
+231:2:3749
+232:2:3748
+233:2:3749
+234:2:3756
+235:2:3760
+236:0:3997
+237:2:2747
+238:2:3553
+239:0:3997
+240:2:2575
+241:0:3997
+242:2:2748
+243:0:3997
+244:2:2575
+245:0:3997
+246:2:2751
+247:2:2752
+248:2:2756
+249:2:2757
+250:2:2765
+251:2:2766
+252:2:2770
+253:2:2771
+254:2:2779
+255:2:2784
+256:2:2788
+257:2:2789
+258:2:2797
+259:2:2798
+260:2:2802
+261:2:2803
+262:2:2797
+263:2:2798
+264:2:2802
+265:2:2803
+266:2:2811
+267:2:2816
+268:2:2823
+269:2:2824
+270:2:2831
+271:2:2836
+272:2:2843
+273:2:2844
+274:2:2843
+275:2:2844
+276:2:2851
+277:2:2860
+278:0:3997
+279:2:2575
+280:0:3997
+281:2:2864
+282:2:2865
+283:2:2866
+284:2:2878
+285:2:2879
+286:2:2883
+287:2:2884
+288:2:2892
+289:2:2897
+290:2:2901
+291:2:2902
+292:2:2910
+293:2:2911
+294:2:2915
+295:2:2916
+296:2:2910
+297:2:2911
+298:2:2915
+299:2:2916
+300:2:2924
+301:2:2929
+302:2:2936
+303:2:2937
+304:2:2944
+305:2:2949
+306:2:2956
+307:2:2957
+308:2:2956
+309:2:2957
+310:2:2964
+311:2:2977
+312:2:2978
+313:0:3997
+314:2:2575
+315:0:3997
+316:2:3091
+317:2:3092
+318:2:3096
+319:2:3097
+320:2:3105
+321:2:3106
+322:2:3110
+323:2:3111
+324:2:3119
+325:2:3124
+326:2:3128
+327:2:3129
+328:2:3137
+329:2:3138
+330:2:3142
+331:2:3143
+332:2:3137
+333:2:3138
+334:2:3142
+335:2:3143
+336:2:3151
+337:2:3156
+338:2:3163
+339:2:3164
+340:2:3171
+341:2:3176
+342:2:3183
+343:2:3184
+344:2:3183
+345:2:3184
+346:2:3191
+347:0:3997
+348:2:2575
+349:0:3997
+350:2:3202
+351:2:3203
+352:2:3207
+353:2:3208
+354:2:3216
+355:2:3217
+356:2:3221
+357:2:3222
+358:2:3230
+359:2:3235
+360:2:3239
+361:2:3240
+362:2:3248
+363:2:3249
+364:2:3253
+365:2:3254
+366:2:3248
+367:2:3249
+368:2:3253
+369:2:3254
+370:2:3262
+371:2:3267
+372:2:3274
+373:2:3275
+374:2:3282
+375:2:3287
+376:2:3294
+377:2:3295
+378:2:3294
+379:2:3295
+380:2:3302
+381:2:3311
+382:0:3997
+383:2:2575
+384:0:3997
+385:2:3315
+386:2:3316
+387:2:3317
+388:2:3329
+389:2:3330
+390:2:3334
+391:2:3335
+392:2:3343
+393:2:3348
+394:2:3352
+395:2:3353
+396:2:3361
+397:2:3362
+398:2:3366
+399:2:3367
+400:2:3361
+401:2:3362
+402:2:3366
+403:2:3367
+404:2:3375
+405:2:3380
+406:2:3387
+407:2:3388
+408:2:3395
+409:2:3400
+410:2:3407
+411:2:3408
+412:2:3407
+413:2:3408
+414:2:3415
+415:2:3427
+416:2:3428
+417:0:3997
+418:2:2575
+419:0:3997
+420:2:3541
+421:0:3997
+422:2:3770
+423:2:3771
+424:2:3775
+425:2:3779
+426:2:3780
+427:2:3784
+428:2:3792
+429:2:3793
+430:2:3797
+431:2:3801
+432:2:3802
+433:2:3797
+434:2:3801
+435:2:3802
+436:2:3806
+437:2:3813
+438:2:3820
+439:2:3821
+440:2:3828
+441:2:3833
+442:2:3840
+443:2:3841
+444:2:3840
+445:2:3841
+446:2:3848
+447:2:3852
+448:0:3997
+449:2:3857
+450:0:3997
+451:2:3858
+452:0:3997
+453:2:3859
+454:0:3997
+455:2:3860
+456:0:3997
+457:1:53
+458:0:3997
+459:2:3861
+460:0:3997
+461:1:57
+462:1:58
+463:1:62
+464:1:66
+465:1:67
+466:1:71
+467:1:79
+468:1:80
+469:1:84
+470:1:88
+471:1:89
+472:1:84
+473:1:88
+474:1:89
+475:1:93
+476:1:100
+477:1:107
+478:1:108
+479:1:115
+480:1:120
+481:1:127
+482:1:128
+483:1:127
+484:1:128
+485:1:135
+486:1:139
+487:0:3997
+488:2:3860
+489:0:3997
+490:1:144
+491:0:3997
+492:2:3861
+493:0:3997
+494:2:3862
+495:0:3997
+496:2:3867
+497:0:3997
+498:2:3868
+499:0:3997
+500:2:3876
+501:2:3877
+502:2:3881
+503:2:3885
+504:2:3886
+505:2:3890
+506:2:3898
+507:2:3899
+508:2:3903
+509:2:3907
+510:2:3908
+511:2:3903
+512:2:3907
+513:2:3908
+514:2:3912
+515:2:3919
+516:2:3926
+517:2:3927
+518:2:3934
+519:2:3939
+520:2:3946
+521:2:3947
+522:2:3946
+523:2:3947
+524:2:3954
+525:2:3958
+526:0:3997
+527:2:3543
+528:2:3553
+529:0:3997
+530:2:2575
+531:0:3997
+532:2:3544
+533:2:3545
+534:0:3997
+535:2:2575
+536:0:3997
+537:2:3549
+538:0:3997
+539:2:3557
+540:0:3997
+541:2:2572
+542:0:3997
+543:2:2573
+544:0:3997
+545:2:2574
+546:0:3997
+547:2:2575
+548:0:3997
+549:2:2576
+550:2:2577
+551:2:2581
+552:2:2582
+553:2:2590
+554:2:2591
+555:2:2595
+556:2:2596
+557:2:2604
+558:2:2609
+559:2:2613
+560:2:2614
+561:2:2622
+562:2:2623
+563:2:2624
+564:2:2622
+565:2:2623
+566:2:2627
+567:2:2628
+568:2:2636
+569:2:2641
+570:2:2648
+571:2:2649
+572:2:2656
+573:2:2661
+574:2:2668
+575:2:2669
+576:2:2668
+577:2:2669
+578:2:2676
+579:2:2686
+580:0:3997
+581:2:2575
+582:0:3997
+583:2:2690
+584:2:2694
+585:2:2695
+586:2:2699
+587:2:2703
+588:2:2704
+589:2:2708
+590:2:2716
+591:2:2717
+592:2:2721
+593:2:2722
+594:2:2721
+595:2:2725
+596:2:2726
+597:2:2730
+598:0:3997
+599:2:2575
+600:0:3997
+601:2:2738
+602:2:2739
+603:2:2740
+604:0:3997
+605:2:2575
+606:0:3997
+607:2:2745
+608:0:3997
+609:2:3572
+610:2:3573
+611:2:3577
+612:2:3581
+613:2:3582
+614:2:3586
+615:2:3591
+616:2:3599
+617:2:3603
+618:2:3604
+619:2:3599
+620:2:3603
+621:2:3604
+622:2:3608
+623:2:3615
+624:2:3622
+625:2:3623
+626:2:3630
+627:2:3635
+628:2:3642
+629:2:3643
+630:2:3642
+631:2:3643
+632:2:3650
+633:2:3654
+634:0:3997
+635:2:3659
+636:0:3997
+637:2:3660
+638:0:3997
+639:2:3661
+640:0:3997
+641:2:3662
+642:0:3997
+643:1:53
+644:0:3997
+645:2:3663
+646:0:3997
+647:1:57
+648:1:58
+649:1:62
+650:1:66
+651:1:67
+652:1:71
+653:1:79
+654:1:80
+655:1:84
+656:1:88
+657:1:89
+658:1:84
+659:1:88
+660:1:89
+661:1:93
+662:1:100
+663:1:107
+664:1:108
+665:1:115
+666:1:120
+667:1:127
+668:1:128
+669:1:127
+670:1:128
+671:1:135
+672:1:139
+673:0:3997
+674:2:3662
+675:0:3997
+676:1:144
+677:0:3997
+678:2:3663
+679:0:3997
+680:2:3664
+681:0:3997
+682:2:3669
+683:0:3997
+684:2:3670
+685:0:3997
+686:2:3678
+687:2:3679
+688:2:3683
+689:2:3687
+690:2:3688
+691:2:3692
+692:2:3700
+693:2:3701
+694:2:3705
+695:2:3709
+696:2:3710
+697:2:3705
+698:2:3709
+699:2:3710
+700:2:3714
+701:2:3721
+702:2:3728
+703:2:3729
+704:2:3736
+705:2:3741
+706:2:3748
+707:2:3749
+708:2:3748
+709:2:3749
+710:2:3756
+711:2:3760
+712:0:3997
+713:2:2747
+714:2:3553
+715:0:3997
+716:2:2575
+717:0:3997
+718:2:2748
+719:0:3997
+720:2:2575
+721:0:3997
+722:2:2751
+723:2:2752
+724:2:2756
+725:2:2757
+726:2:2765
+727:2:2766
+728:2:2770
+729:2:2771
+730:2:2779
+731:2:2784
+732:2:2788
+733:2:2789
+734:2:2797
+735:2:2798
+736:2:2802
+737:2:2803
+738:2:2797
+739:2:2798
+740:2:2802
+741:2:2803
+742:2:2811
+743:2:2816
+744:2:2823
+745:2:2824
+746:2:2831
+747:2:2836
+748:2:2843
+749:2:2844
+750:2:2843
+751:2:2844
+752:2:2851
+753:2:2860
+754:0:3997
+755:2:2575
+756:0:3997
+757:2:2864
+758:2:2865
+759:2:2866
+760:2:2878
+761:2:2879
+762:2:2883
+763:2:2884
+764:2:2892
+765:2:2897
+766:2:2901
+767:2:2902
+768:2:2910
+769:2:2911
+770:2:2915
+771:2:2916
+772:2:2910
+773:2:2911
+774:2:2915
+775:2:2916
+776:2:2924
+777:2:2929
+778:2:2936
+779:2:2937
+780:2:2944
+781:2:2949
+782:2:2956
+783:2:2957
+784:2:2956
+785:2:2957
+786:2:2964
+787:2:2977
+788:2:2978
+789:0:3997
+790:2:2575
+791:0:3997
+792:2:3091
+793:2:3092
+794:2:3096
+795:2:3097
+796:2:3105
+797:2:3106
+798:2:3110
+799:2:3111
+800:2:3119
+801:2:3124
+802:2:3128
+803:2:3129
+804:2:3137
+805:2:3138
+806:2:3142
+807:2:3143
+808:2:3137
+809:2:3138
+810:2:3142
+811:2:3143
+812:2:3151
+813:2:3156
+814:2:3163
+815:2:3164
+816:2:3171
+817:2:3176
+818:2:3183
+819:2:3184
+820:2:3183
+821:2:3184
+822:2:3191
+823:0:3997
+824:2:2575
+825:0:3997
+826:2:3202
+827:2:3203
+828:2:3207
+829:2:3208
+830:2:3216
+831:2:3217
+832:2:3221
+833:2:3222
+834:2:3230
+835:2:3235
+836:2:3239
+837:2:3240
+838:2:3248
+839:2:3249
+840:2:3253
+841:2:3254
+842:2:3248
+843:2:3249
+844:2:3253
+845:2:3254
+846:2:3262
+847:2:3267
+848:2:3274
+849:2:3275
+850:2:3282
+851:2:3287
+852:2:3294
+853:2:3295
+854:2:3294
+855:2:3295
+856:2:3302
+857:2:3311
+858:0:3997
+859:2:2575
+860:0:3997
+861:2:3315
+862:2:3316
+863:2:3317
+864:2:3329
+865:2:3330
+866:2:3334
+867:2:3335
+868:2:3343
+869:2:3348
+870:2:3352
+871:2:3353
+872:2:3361
+873:2:3362
+874:2:3366
+875:2:3367
+876:2:3361
+877:2:3362
+878:2:3366
+879:2:3367
+880:2:3375
+881:2:3380
+882:2:3387
+883:2:3388
+884:2:3395
+885:2:3400
+886:2:3407
+887:2:3408
+888:2:3407
+889:2:3408
+890:2:3415
+891:2:3427
+892:2:3428
+893:0:3997
+894:2:2575
+895:0:3997
+896:2:3541
+897:0:3997
+898:2:3770
+899:2:3771
+900:2:3775
+901:2:3779
+902:2:3780
+903:2:3784
+904:2:3792
+905:2:3793
+906:2:3797
+907:2:3801
+908:2:3802
+909:2:3797
+910:2:3801
+911:2:3802
+912:2:3806
+913:2:3813
+914:2:3820
+915:2:3821
+916:2:3828
+917:2:3833
+918:2:3840
+919:2:3841
+920:2:3840
+921:2:3841
+922:2:3848
+923:2:3852
+924:0:3997
+925:2:3857
+926:0:3997
+927:2:3858
+928:0:3997
+929:2:3859
+930:0:3997
+931:2:3860
+932:0:3997
+933:1:53
+934:0:3997
+935:2:3861
+936:0:3997
+937:1:57
+938:1:58
+939:1:62
+940:1:66
+941:1:67
+942:1:71
+943:1:79
+944:1:80
+945:1:84
+946:1:88
+947:1:89
+948:1:84
+949:1:88
+950:1:89
+951:1:93
+952:1:100
+953:1:107
+954:1:108
+955:1:115
+956:1:120
+957:1:127
+958:1:128
+959:1:127
+960:1:128
+961:1:135
+962:1:139
+963:0:3997
+964:2:3860
+965:0:3997
+966:1:144
+967:0:3997
+968:2:3861
+969:0:3997
+970:2:3862
+971:0:3997
+972:2:3867
+973:0:3997
+974:2:3868
+975:0:3997
+976:2:3876
+977:2:3877
+978:2:3881
+979:2:3885
+980:2:3886
+981:2:3890
+982:2:3898
+983:2:3899
+984:2:3903
+985:2:3907
+986:2:3908
+987:2:3903
+988:2:3907
+989:2:3908
+990:2:3912
+991:2:3919
+992:2:3926
+993:2:3927
+994:2:3934
+995:2:3939
+996:2:3946
+997:2:3947
+998:2:3946
+999:2:3947
+1000:2:3954
+1001:2:3958
+1002:0:3997
+1003:2:3543
+1004:2:3553
+1005:0:3997
+1006:2:2575
+1007:0:3997
+1008:2:3544
+1009:2:3545
+1010:0:3997
+1011:2:2575
+1012:0:3997
+1013:2:3549
+1014:0:3997
+1015:2:3557
+1016:0:3997
+1017:2:2572
+1018:0:3997
+1019:2:2573
+1020:0:3997
+1021:2:2574
+1022:0:3997
+1023:2:2575
+1024:0:3997
+1025:2:2576
+1026:2:2577
+1027:2:2581
+1028:2:2582
+1029:2:2590
+1030:2:2591
+1031:2:2595
+1032:2:2596
+1033:2:2604
+1034:2:2609
+1035:2:2613
+1036:2:2614
+1037:2:2622
+1038:2:2623
+1039:2:2627
+1040:2:2628
+1041:2:2622
+1042:2:2623
+1043:2:2624
+1044:2:2636
+1045:2:2641
+1046:2:2648
+1047:2:2649
+1048:2:2656
+1049:2:2661
+1050:2:2668
+1051:2:2669
+1052:2:2668
+1053:2:2669
+1054:2:2676
+1055:2:2686
+1056:0:3997
+1057:2:2575
+1058:0:3997
+1059:2:2690
+1060:2:2694
+1061:2:2695
+1062:2:2699
+1063:2:2703
+1064:2:2704
+1065:2:2708
+1066:2:2716
+1067:2:2717
+1068:2:2721
+1069:2:2725
+1070:2:2726
+1071:2:2721
+1072:2:2722
+1073:2:2730
+1074:0:3997
+1075:2:2575
+1076:0:3997
+1077:2:2738
+1078:2:2739
+1079:2:2740
+1080:0:3997
+1081:2:2575
+1082:0:3997
+1083:2:2745
+1084:0:3997
+1085:2:3572
+1086:2:3573
+1087:2:3577
+1088:2:3581
+1089:2:3582
+1090:2:3586
+1091:2:3591
+1092:2:3599
+1093:2:3603
+1094:2:3604
+1095:2:3599
+1096:2:3603
+1097:2:3604
+1098:2:3608
+1099:2:3615
+1100:2:3622
+1101:2:3623
+1102:2:3630
+1103:2:3635
+1104:2:3642
+1105:2:3643
+1106:2:3642
+1107:2:3643
+1108:2:3650
+1109:2:3654
+1110:0:3997
+1111:2:3659
+1112:0:3997
+1113:2:3660
+1114:0:3997
+1115:2:3661
+1116:0:3997
+1117:2:3662
+1118:0:3997
+1119:1:53
+1120:0:3997
+1121:2:3663
+1122:0:3997
+1123:1:57
+1124:1:58
+1125:1:62
+1126:1:66
+1127:1:67
+1128:1:71
+1129:1:79
+1130:1:80
+1131:1:84
+1132:1:88
+1133:1:89
+1134:1:84
+1135:1:88
+1136:1:89
+1137:1:93
+1138:1:100
+1139:1:107
+1140:1:108
+1141:1:115
+1142:1:120
+1143:1:127
+1144:1:128
+1145:1:127
+1146:1:128
+1147:1:135
+1148:1:139
+1149:0:3997
+1150:2:3662
+1151:0:3997
+1152:1:144
+1153:0:3997
+1154:2:3663
+1155:0:3997
+1156:2:3664
+1157:0:3997
+1158:2:3669
+1159:0:3997
+1160:2:3670
+1161:0:3997
+1162:2:3678
+1163:2:3679
+1164:2:3683
+1165:2:3687
+1166:2:3688
+1167:2:3692
+1168:2:3700
+1169:2:3701
+1170:2:3705
+1171:2:3709
+1172:2:3710
+1173:2:3705
+1174:2:3709
+1175:2:3710
+1176:2:3714
+1177:2:3721
+1178:2:3728
+1179:2:3729
+1180:2:3736
+1181:2:3741
+1182:2:3748
+1183:2:3749
+1184:2:3748
+1185:2:3749
+1186:2:3756
+1187:2:3760
+1188:0:3997
+1189:2:2747
+1190:2:3553
+1191:0:3997
+1192:2:2575
+1193:0:3997
+1194:2:2748
+1195:0:3997
+1196:2:2575
+1197:0:3997
+1198:2:2751
+1199:2:2752
+1200:2:2756
+1201:2:2757
+1202:2:2765
+1203:2:2766
+1204:2:2770
+1205:2:2771
+1206:2:2779
+1207:2:2784
+1208:2:2788
+1209:2:2789
+1210:2:2797
+1211:2:2798
+1212:2:2802
+1213:2:2803
+1214:2:2797
+1215:2:2798
+1216:2:2802
+1217:2:2803
+1218:2:2811
+1219:2:2816
+1220:2:2823
+1221:2:2824
+1222:2:2831
+1223:2:2836
+1224:2:2843
+1225:2:2844
+1226:2:2843
+1227:2:2844
+1228:2:2851
+1229:2:2860
+1230:0:3997
+1231:2:2575
+1232:0:3997
+1233:2:2864
+1234:2:2865
+1235:2:2866
+1236:2:2878
+1237:2:2879
+1238:2:2883
+1239:2:2884
+1240:2:2892
+1241:2:2897
+1242:2:2901
+1243:2:2902
+1244:2:2910
+1245:2:2911
+1246:2:2915
+1247:2:2916
+1248:2:2910
+1249:2:2911
+1250:2:2915
+1251:2:2916
+1252:2:2924
+1253:2:2929
+1254:2:2936
+1255:2:2937
+1256:2:2944
+1257:2:2949
+1258:2:2956
+1259:2:2957
+1260:2:2956
+1261:2:2957
+1262:2:2964
+1263:2:2977
+1264:2:2978
+1265:0:3997
+1266:2:2575
+1267:0:3997
+1268:2:3091
+1269:2:3092
+1270:2:3096
+1271:2:3097
+1272:2:3105
+1273:2:3106
+1274:2:3110
+1275:2:3111
+1276:2:3119
+1277:2:3124
+1278:2:3128
+1279:2:3129
+1280:2:3137
+1281:2:3138
+1282:2:3142
+1283:2:3143
+1284:2:3137
+1285:2:3138
+1286:2:3142
+1287:2:3143
+1288:2:3151
+1289:2:3156
+1290:2:3163
+1291:2:3164
+1292:2:3171
+1293:2:3176
+1294:2:3183
+1295:2:3184
+1296:2:3183
+1297:2:3184
+1298:2:3191
+1299:0:3997
+1300:2:2575
+1301:0:3997
+1302:2:3202
+1303:2:3203
+1304:2:3207
+1305:2:3208
+1306:2:3216
+1307:2:3217
+1308:2:3221
+1309:2:3222
+1310:2:3230
+1311:2:3235
+1312:2:3239
+1313:2:3240
+1314:2:3248
+1315:2:3249
+1316:2:3253
+1317:2:3254
+1318:2:3248
+1319:2:3249
+1320:2:3253
+1321:2:3254
+1322:2:3262
+1323:2:3267
+1324:2:3274
+1325:2:3275
+1326:2:3282
+1327:2:3287
+1328:2:3294
+1329:2:3295
+1330:2:3294
+1331:2:3295
+1332:2:3302
+1333:2:3311
+1334:0:3997
+1335:2:2575
+1336:0:3997
+1337:1:145
+1338:0:3997
+1339:1:147
+1340:0:3997
+1341:1:46
+1342:0:3997
+1343:1:153
+1344:1:154
+1345:1:158
+1346:1:159
+1347:1:167
+1348:1:168
+1349:1:172
+1350:1:173
+1351:1:181
+1352:1:186
+1353:1:190
+1354:1:191
+1355:1:199
+1356:1:200
+1357:1:204
+1358:1:205
+1359:1:199
+1360:1:200
+1361:1:204
+1362:1:205
+1363:1:213
+1364:1:218
+1365:1:225
+1366:1:226
+1367:1:233
+1368:1:238
+1369:1:245
+1370:1:246
+1371:1:245
+1372:1:246
+1373:1:253
+1374:0:3997
+1375:1:42
+1376:0:3997
+1377:1:43
+1378:0:3997
+1379:1:44
+1380:0:3997
+1381:1:145
+1382:0:3997
+1383:1:147
+1384:0:3997
+1385:1:46
+1386:0:3997
+1387:1:264
+1388:1:265
+1389:0:3997
+1390:1:42
+1391:0:3997
+1392:1:43
+1393:0:3997
+1394:1:44
+1395:0:3997
+1396:1:145
+1397:0:3997
+1398:1:147
+1399:0:3997
+1400:1:46
+1401:0:3997
+1402:1:271
+1403:1:272
+1404:1:276
+1405:1:277
+1406:1:285
+1407:1:286
+1408:1:290
+1409:1:291
+1410:1:299
+1411:1:304
+1412:1:308
+1413:1:309
+1414:1:317
+1415:1:318
+1416:1:322
+1417:1:323
+1418:1:317
+1419:1:318
+1420:1:322
+1421:1:323
+1422:1:331
+1423:1:336
+1424:1:343
+1425:1:344
+1426:1:351
+1427:1:356
+1428:1:363
+1429:1:364
+1430:1:363
+1431:1:364
+1432:1:371
+1433:0:3997
+1434:1:42
+1435:0:3997
+1436:1:43
+1437:0:3997
+1438:1:44
+1439:0:3997
+1440:1:145
+1441:0:3997
+1442:1:147
+1443:0:3997
+1444:1:46
+1445:0:3997
+1446:1:382
+1447:1:383
+1448:1:387
+1449:1:388
+1450:1:396
+1451:1:397
+1452:1:401
+1453:1:402
+1454:1:410
+1455:1:415
+1456:1:419
+1457:1:420
+1458:1:428
+1459:1:429
+1460:1:433
+1461:1:434
+1462:1:428
+1463:1:429
+1464:1:433
+1465:1:434
+1466:1:442
+1467:1:447
+1468:1:454
+1469:1:455
+1470:1:462
+1471:1:467
+1472:1:474
+1473:1:475
+1474:1:474
+1475:1:475
+1476:1:482
+1477:1:491
+1478:0:3997
+1479:1:42
+1480:0:3997
+1481:1:43
+1482:0:3997
+1483:1:44
+1484:0:3997
+1485:1:145
+1486:0:3997
+1487:1:147
+1488:0:3995
+1489:1:46
+1490:0:4001
+1491:1:1067
+1492:1:1068
+1493:1:1072
+1494:1:1073
+1495:1:1081
+1496:1:1082
+1497:1:1083
+1498:1:1095
+1499:1:1100
+1500:1:1104
+1501:1:1105
+1502:1:1113
+1503:1:1114
+1504:1:1118
+1505:1:1119
+1506:1:1113
+1507:1:1114
+1508:1:1118
+1509:1:1119
+1510:1:1127
+1511:1:1132
+1512:1:1139
+1513:1:1140
+1514:1:1147
+1515:1:1152
+1516:1:1159
+1517:1:1160
+1518:1:1159
+1519:1:1160
+1520:1:1167
+1521:0:4001
+1522:1:42
+1523:0:4001
+1524:1:43
+1525:0:4001
+1526:2:3315
+1527:2:3316
+1528:2:3317
+1529:2:3329
+1530:2:3330
+1531:2:3334
+1532:2:3335
+1533:2:3343
+1534:2:3348
+1535:2:3352
+1536:2:3353
+1537:2:3361
+1538:2:3362
+1539:2:3366
+1540:2:3367
+1541:2:3361
+1542:2:3362
+1543:2:3366
+1544:2:3367
+1545:2:3375
+1546:2:3380
+1547:2:3387
+1548:2:3388
+1549:2:3395
+1550:2:3400
+1551:2:3407
+1552:2:3408
+1553:2:3407
+1554:2:3408
+1555:2:3415
+1556:2:3425
+1557:0:4001
+1558:2:2575
+-1:-1:-1
+1559:0:4001
+1560:2:3431
+1561:2:3432
+1562:2:3436
+1563:2:3437
+1564:2:3445
+1565:2:3446
+1566:2:3450
+1567:2:3451
+1568:2:3459
+1569:2:3464
+1570:2:3468
+1571:2:3469
+1572:2:3477
+1573:2:3478
+1574:2:3482
+1575:2:3483
+1576:2:3477
+1577:2:3478
+1578:2:3482
+1579:2:3483
+1580:2:3491
+1581:2:3496
+1582:2:3503
+1583:2:3504
+1584:2:3511
+1585:2:3516
+1586:2:3523
+1587:2:3524
+1588:2:3523
+1589:2:3524
+1590:2:3531
+1591:0:4001
+1592:2:2575
+1593:0:4001
+1594:2:3315
+1595:2:3316
+1596:2:3320
+1597:2:3321
+1598:2:3329
+1599:2:3330
+1600:2:3334
+1601:2:3335
+1602:2:3343
+1603:2:3348
+1604:2:3352
+1605:2:3353
+1606:2:3361
+1607:2:3362
+1608:2:3366
+1609:2:3367
+1610:2:3361
+1611:2:3362
+1612:2:3366
+1613:2:3367
+1614:2:3375
+1615:2:3380
+1616:2:3387
+1617:2:3388
+1618:2:3395
+1619:2:3400
+1620:2:3407
+1621:2:3408
+1622:2:3407
+1623:2:3408
+1624:2:3415
+1625:2:3425
+1626:0:4001
+1627:2:2575
+1628:0:4001
+1629:2:3431
+1630:2:3432
+1631:2:3436
+1632:2:3437
+1633:2:3445
+1634:2:3446
+1635:2:3450
+1636:2:3451
+1637:2:3459
+1638:2:3464
+1639:2:3468
+1640:2:3469
+1641:2:3477
+1642:2:3478
+1643:2:3482
+1644:2:3483
+1645:2:3477
+1646:2:3478
+1647:2:3482
+1648:2:3483
+1649:2:3491
+1650:2:3496
+1651:2:3503
+1652:2:3504
+1653:2:3511
+1654:2:3516
+1655:2:3523
+1656:2:3524
+1657:2:3523
+1658:2:3524
+1659:2:3531
+1660:0:4001
+1661:2:2575
+1662:0:4001
+1663:2:3315
+1664:2:3316
+1665:2:3320
+1666:2:3321
+1667:2:3329
+1668:2:3330
+1669:2:3334
+1670:2:3335
+1671:2:3343
+1672:2:3348
+1673:2:3352
+1674:2:3353
+1675:2:3361
+1676:2:3362
+1677:2:3366
+1678:2:3367
+1679:2:3361
+1680:2:3362
+1681:2:3366
+1682:2:3367
+1683:2:3375
+1684:2:3380
+1685:2:3387
+1686:2:3388
+1687:2:3395
+1688:2:3400
+1689:2:3407
+1690:2:3408
+1691:2:3407
+1692:2:3408
+1693:2:3415
+1694:2:3425
+1695:0:4001
+1696:2:2575
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/DEFINES b/formal-model/urcu-controldataflow-intel-ipi-compress/DEFINES
new file mode 100644 (file)
index 0000000..abea5ff
--- /dev/null
@@ -0,0 +1,18 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/Makefile b/formal-model/urcu-controldataflow-intel-ipi-compress/Makefile
new file mode 100644 (file)
index 0000000..f8bfd31
--- /dev/null
@@ -0,0 +1,171 @@
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) Mathieu Desnoyers, 2009
+#
+# Authors: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+#CFLAGS=-DSAFETY
+#for multi-core verif, 15.5GB shared mem, use files if full
+#CFLAGS=-DHASH64 -DMEMLIM=15500 -DNCORE=2
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88 -DMEMLIM=15500 -DNCORE=8
+
+#liveness
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88
+CFLAGS=-DHASH64 -DCOLLAPSE
+#CFLAGS=-DHASH64
+
+SPINFILE=urcu.spin
+
+default:
+       #make urcu_free | tee urcu_free.log
+       #make urcu_free_no_mb | tee urcu_free_no_mb.log
+       #make urcu_free_no_rmb | tee urcu_free_no_rmb.log
+       #make urcu_free_no_wmb | tee urcu_free_no_wmb.log
+       #make urcu_free_single_flip | tee urcu_free_single_flip.log
+       make urcu_progress_writer | tee urcu_progress_writer.log
+       make urcu_progress_reader | tee urcu_progress_reader.log
+       make urcu_progress_writer_error | tee urcu_progress_writer_error.log
+       #make asserts | tee asserts.log
+       make summary
+
+#show trail : spin -v -t -N pan.ltl input.spin
+# after each individual make.
+
+summary:
+       @echo
+       @echo "Verification summary"
+       @grep errors: *.log
+
+asserts: clean
+       cat DEFINES > .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X .input.spin
+       gcc -O2 -w ${CFLAGS} -DSAFETY -o pan pan.c
+       ./pan -v -c1 -X -m10000000 -w20
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free: clean urcu_free_ltl run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested: clean urcu_free_ltl urcu_free_nested_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested_define:
+       cp urcu_free_nested.define .input.define
+
+urcu_free_no_rmb: clean urcu_free_ltl urcu_free_no_rmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_rmb_define:
+       cp urcu_free_no_rmb.define .input.define
+
+urcu_free_no_wmb: clean urcu_free_ltl urcu_free_no_wmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_wmb_define:
+       cp urcu_free_no_wmb.define .input.define
+
+urcu_free_no_mb: clean urcu_free_ltl urcu_free_no_mb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_mb_define:
+       cp urcu_free_no_mb.define .input.define
+
+urcu_free_single_flip: clean urcu_free_ltl urcu_free_single_flip_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_single_flip_define:
+       cp urcu_free_single_flip.define .input.define
+
+urcu_free_ltl:
+       touch .input.define
+       cat .input.define >> pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+
+# Progress checks
+
+urcu_progress_writer: clean urcu_progress_writer_ltl \
+               urcu_progress_writer_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_define:
+       cp urcu_progress_writer.define .input.define
+
+urcu_progress_writer_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_reader: clean urcu_progress_reader_ltl \
+               urcu_progress_reader_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_reader_define:
+       cp urcu_progress_reader.define .input.define
+
+urcu_progress_reader_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_writer_error: clean urcu_progress_writer_error_ltl \
+               urcu_progress_writer_error_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_error_define:
+       cp urcu_progress_writer_error.define .input.define
+
+urcu_progress_writer_error_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+
+run_weak_fair: pan
+       ./pan -a -f -v -c1 -X -m10000000 -w20
+
+run: pan
+       ./pan -a -v -c1 -X -m10000000 -w20
+
+pan: pan.c
+       gcc -O2 -w ${CFLAGS} -o pan pan.c
+
+pan.c: pan.ltl ${SPINFILE}
+       cat .input.define > .input.spin
+       cat DEFINES >> .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X -N pan.ltl .input.spin
+
+.PHONY: clean default distclean summary
+clean:
+       rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+distclean:
+       rm -f *.trail *.input *.log
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/references.txt b/formal-model/urcu-controldataflow-intel-ipi-compress/references.txt
new file mode 100644 (file)
index 0000000..72c67a2
--- /dev/null
@@ -0,0 +1,13 @@
+http://spinroot.com/spin/Man/ltl.html
+http://en.wikipedia.org/wiki/Linear_temporal_logic
+http://www.dcs.gla.ac.uk/~muffy/MRS4-2002/lect11.ppt
+
+http://www.lsv.ens-cachan.fr/~gastin/ltl2ba/index.php
+http://spinroot.com/spin/Man/index.html
+http://spinroot.com/spin/Man/promela.html
+
+LTL vs CTL :
+
+http://spinroot.com/spin/Doc/course/lecture12.pdf p. 9, p. 15, p. 18
+http://www-i2.informatik.rwth-aachen.de/i2/fileadmin/user_upload/documents/Introduction_to_Model_Checking/mc_lec18.pdf
+  (downloaded)
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu.sh b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu.sh
new file mode 100644 (file)
index 0000000..65ff517
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Compiles and runs the urcu.spin Promela model.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) IBM Corporation, 2009
+#               Mathieu Desnoyers, 2009
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+#          Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+# Basic execution, without LTL clauses. See Makefile.
+
+spin -a urcu.spin
+cc -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w21
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu.spin b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu.spin
new file mode 100644 (file)
index 0000000..8075506
--- /dev/null
@@ -0,0 +1,1321 @@
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free.log b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free.log
new file mode 100644 (file)
index 0000000..43e9bba
--- /dev/null
@@ -0,0 +1,55 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi-compress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1294)
+Depth=    9223 States=    1e+06 Transitions= 6.87e+06 Memory=   516.350        t=   19.4 R=   5e+04
+Depth=    9223 States=    2e+06 Transitions= 1.47e+07 Memory=   563.713        t=   43.1 R=   5e+04
+Depth=    9223 States=    3e+06 Transitions= 2.46e+07 Memory=   613.127        t=   73.9 R=   4e+04
+pan: resizing hashtable to -w22..  done
+Depth=    9223 States=    4e+06 Transitions= 3.19e+07 Memory=   690.440        t=   95.5 R=   4e+04
+Depth=    9223 States=    5e+06 Transitions= 3.95e+07 Memory=   736.533        t=    118 R=   4e+04
+Depth=    9223 States=    6e+06 Transitions= 5.71e+07 Memory=   785.068        t=    174 R=   3e+04
+Depth=    9223 States=    7e+06 Transitions= 6.81e+07 Memory=   834.580        t=    209 R=   3e+04
+Depth=    9223 States=    8e+06 Transitions= 8.22e+07 Memory=   883.311        t=    254 R=   3e+04
+Depth=    9223 States=    9e+06 Transitions= 9.54e+07 Memory=   932.139        t=    296 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=    9223 States=    1e+07 Transitions= 1.08e+08 Memory=  1104.670        t=    338 R=   3e+04
+Depth=    9223 States=  1.1e+07 Transitions= 1.21e+08 Memory=  1155.451        t=    375 R=   3e+04
+Depth=    9223 States=  1.2e+07 Transitions=  1.3e+08 Memory=  1205.744        t=    403 R=   3e+04
+Depth=    9223 States=  1.3e+07 Transitions= 1.42e+08 Memory=  1254.572        t=    442 R=   3e+04
+Depth=    9223 States=  1.4e+07 Transitions= 1.72e+08 Memory=  1302.717        t=    539 R=   3e+04
+Depth=    9223 States=  1.5e+07 Transitions= 1.91e+08 Memory=  1354.768        t=    600 R=   3e+04
+Depth=    9223 States=  1.6e+07 Transitions= 2.08e+08 Memory=  1405.842        t=    653 R=   2e+04
+Depth=    9223 States=  1.7e+07 Transitions=  2.2e+08 Memory=  1456.818        t=    691 R=   2e+04
+Depth=    9223 States=  1.8e+07 Transitions= 2.39e+08 Memory=  1506.135        t=    751 R=   2e+04
+Depth=    9223 States=  1.9e+07 Transitions= 2.55e+08 Memory=  1556.330        t=    801 R=   2e+04
+Depth=    9223 States=    2e+07 Transitions= 2.72e+08 Memory=  1604.084        t=    856 R=   2e+04
+Depth=    9285 States=  2.1e+07 Transitions= 2.85e+08 Memory=  1650.080        t=    898 R=   2e+04
+Depth=    9324 States=  2.2e+07 Transitions= 2.99e+08 Memory=  1696.760        t=    941 R=   2e+04
+Depth=    9324 States=  2.3e+07 Transitions=  3.1e+08 Memory=  1746.369        t=    976 R=   2e+04
+Depth=    9324 States=  2.4e+07 Transitions= 3.21e+08 Memory=  1792.561        t= 1.01e+03 R=   2e+04
+Depth=    9324 States=  2.5e+07 Transitions= 3.34e+08 Memory=  1841.096        t= 1.05e+03 R=   2e+04
+Depth=    9324 States=  2.6e+07 Transitions= 3.45e+08 Memory=  1890.998        t= 1.09e+03 R=   2e+04
+Depth=    9324 States=  2.7e+07 Transitions= 3.59e+08 Memory=  1940.412        t= 1.13e+03 R=   2e+04
+Depth=    9324 States=  2.8e+07 Transitions= 3.71e+08 Memory=  1987.776        t= 1.17e+03 R=   2e+04
+Depth=    9324 States=  2.9e+07 Transitions= 3.84e+08 Memory=  2034.846        t= 1.21e+03 R=   2e+04
+Depth=    9324 States=    3e+07 Transitions= 3.96e+08 Memory=  2081.233        t= 1.25e+03 R=   2e+04
+Depth=    9324 States=  3.1e+07 Transitions= 4.09e+08 Memory=  2129.865        t= 1.29e+03 R=   2e+04
+Depth=    9324 States=  3.2e+07 Transitions= 4.19e+08 Memory=  2179.670        t= 1.32e+03 R=   2e+04
+Depth=    9324 States=  3.3e+07 Transitions=  4.3e+08 Memory=  2227.717        t= 1.36e+03 R=   2e+04
+Depth=    9324 States=  3.4e+07 Transitions= 4.44e+08 Memory=  2277.033        t= 1.4e+03 R=   2e+04
+pan: resizing hashtable to -w26..  done
+Depth=    9324 States=  3.5e+07 Transitions=  4.6e+08 Memory=  2824.190        t= 1.46e+03 R=   2e+04
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free.ltl b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free.ltl
new file mode 100644 (file)
index 0000000..6be1be9
--- /dev/null
@@ -0,0 +1 @@
+[] (!read_poison)
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_nested.define b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_nested.define
new file mode 100644 (file)
index 0000000..0fb59bd
--- /dev/null
@@ -0,0 +1 @@
+#define READER_NEST_LEVEL 2
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_no_mb.define b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_no_mb.define
new file mode 100644 (file)
index 0000000..d99d793
--- /dev/null
@@ -0,0 +1 @@
+#define NO_MB
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_no_rmb.define b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_no_rmb.define
new file mode 100644 (file)
index 0000000..73e61a4
--- /dev/null
@@ -0,0 +1 @@
+#define NO_RMB
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_no_wmb.define b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_no_wmb.define
new file mode 100644 (file)
index 0000000..710f29d
--- /dev/null
@@ -0,0 +1 @@
+#define NO_WMB
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_single_flip.define b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_free_single_flip.define
new file mode 100644 (file)
index 0000000..5e642ef
--- /dev/null
@@ -0,0 +1 @@
+#define SINGLE_FLIP
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress.ltl b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress.ltl
new file mode 100644 (file)
index 0000000..8718641
--- /dev/null
@@ -0,0 +1 @@
+([] <> !np_)
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_reader.define b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_reader.define
new file mode 100644 (file)
index 0000000..ff3f783
--- /dev/null
@@ -0,0 +1 @@
+#define READER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_reader.log b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_reader.log
new file mode 100644 (file)
index 0000000..d087e70
--- /dev/null
@@ -0,0 +1,703 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi-compress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_reader.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1362)
+depth 7: Claim reached state 9 (line 1367)
+depth 131: Claim reached state 9 (line 1366)
+Depth=  108226 States=    1e+06 Transitions=  1.1e+07 Memory=   495.354        t=   32.5 R=   3e+04
+Depth=  108226 States=    2e+06 Transitions=  2.2e+07 Memory=   526.018        t=   65.8 R=   3e+04
+Depth=  108226 States=    3e+06 Transitions= 3.31e+07 Memory=   555.510        t=   99.5 R=   3e+04
+pan: resizing hashtable to -w22..  done
+Depth=  108226 States=    4e+06 Transitions= 4.38e+07 Memory=   615.733        t=    132 R=   3e+04
+Depth=  108226 States=    5e+06 Transitions= 5.47e+07 Memory=   644.346        t=    164 R=   3e+04
+Depth=  108226 States=    6e+06 Transitions= 6.54e+07 Memory=   671.983        t=    197 R=   3e+04
+Depth=  108226 States=    7e+06 Transitions= 7.64e+07 Memory=   700.108        t=    230 R=   3e+04
+Depth=  110273 States=    8e+06 Transitions= 8.89e+07 Memory=   728.428        t=    268 R=   3e+04
+Depth=  110273 States=    9e+06 Transitions= 1.02e+08 Memory=   757.920        t=    310 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=  110273 States=    1e+07 Transitions= 1.14e+08 Memory=   911.408        t=    347 R=   3e+04
+Depth=  110273 States=  1.1e+07 Transitions= 1.26e+08 Memory=   939.924        t=    380 R=   3e+04
+Depth=  110273 States=  1.2e+07 Transitions= 1.37e+08 Memory=   968.440        t=    414 R=   3e+04
+Depth=  110273 States=  1.3e+07 Transitions= 1.58e+08 Memory=   996.272        t=    479 R=   3e+04
+Depth=  110273 States=  1.4e+07 Transitions= 1.69e+08 Memory=  1026.545        t=    513 R=   3e+04
+Depth=  110273 States=  1.5e+07 Transitions= 1.81e+08 Memory=  1055.451        t=    550 R=   3e+04
+Depth=  110273 States=  1.6e+07 Transitions= 1.92e+08 Memory=  1084.943        t=    583 R=   3e+04
+Depth=  110273 States=  1.7e+07 Transitions= 2.03e+08 Memory=  1114.045        t=    615 R=   3e+04
+Depth=  110273 States=  1.8e+07 Transitions= 2.14e+08 Memory=  1144.318        t=    647 R=   3e+04
+Depth=  110273 States=  1.9e+07 Transitions= 2.25e+08 Memory=  1173.420        t=    680 R=   3e+04
+Depth=  110273 States=    2e+07 Transitions= 2.36e+08 Memory=  1202.717        t=    713 R=   3e+04
+Depth=  110273 States=  2.1e+07 Transitions= 2.47e+08 Memory=  1232.404        t=    746 R=   3e+04
+Depth=  110273 States=  2.2e+07 Transitions= 2.58e+08 Memory=  1262.092        t=    778 R=   3e+04
+Depth=  110273 States=  2.3e+07 Transitions= 2.69e+08 Memory=  1290.119        t=    811 R=   3e+04
+Depth=  110273 States=  2.4e+07 Transitions= 2.82e+08 Memory=  1317.268        t=    851 R=   3e+04
+Depth=  110273 States=  2.5e+07 Transitions= 2.96e+08 Memory=  1345.783        t=    893 R=   3e+04
+Depth=  110273 States=  2.6e+07 Transitions= 3.13e+08 Memory=  1373.615        t=    948 R=   3e+04
+Depth=  110273 States=  2.7e+07 Transitions= 3.48e+08 Memory=  1403.889        t= 1.06e+03 R=   3e+04
+Depth=  110273 States=  2.8e+07 Transitions=  3.7e+08 Memory=  1434.651        t= 1.13e+03 R=   2e+04
+Depth=  110273 States=  2.9e+07 Transitions= 3.84e+08 Memory=  1464.338        t= 1.17e+03 R=   2e+04
+Depth=  110273 States=    3e+07 Transitions= 4.04e+08 Memory=  1492.365        t= 1.24e+03 R=   2e+04
+Depth=  110273 States=  3.1e+07 Transitions= 4.26e+08 Memory=  1520.490        t= 1.31e+03 R=   2e+04
+Depth=  110273 States=  3.2e+07 Transitions= 4.42e+08 Memory=  1550.080        t= 1.36e+03 R=   2e+04
+Depth=  110273 States=  3.3e+07 Transitions= 4.63e+08 Memory=  1578.791        t= 1.42e+03 R=   2e+04
+Depth=  110273 States=  3.4e+07 Transitions= 4.87e+08 Memory=  1605.842        t= 1.5e+03 R=   2e+04
+pan: resizing hashtable to -w26..  done
+Depth=  110273 States=  3.5e+07 Transitions= 5.01e+08 Memory=  2130.147        t= 1.55e+03 R=   2e+04
+Depth=  110273 States=  3.6e+07 Transitions= 5.22e+08 Memory=  2158.467        t= 1.61e+03 R=   2e+04
+Depth=  110273 States=  3.7e+07 Transitions=  5.4e+08 Memory=  2186.006        t= 1.67e+03 R=   2e+04
+Depth=  110273 States=  3.8e+07 Transitions= 5.61e+08 Memory=  2213.545        t= 1.74e+03 R=   2e+04
+Depth=  110273 States=  3.9e+07 Transitions= 5.82e+08 Memory=  2242.061        t= 1.82e+03 R=   2e+04
+Depth=  110273 States=    4e+07 Transitions=    6e+08 Memory=  2270.088        t= 1.87e+03 R=   2e+04
+Depth=  110273 States=  4.1e+07 Transitions= 6.18e+08 Memory=  2298.408        t= 1.93e+03 R=   2e+04
+Depth=  110273 States=  4.2e+07 Transitions= 6.37e+08 Memory=  2325.850        t= 1.99e+03 R=   2e+04
+Depth=  110273 States=  4.3e+07 Transitions= 6.65e+08 Memory=  2352.803        t= 2.08e+03 R=   2e+04
+Depth=  110273 States=  4.4e+07 Transitions= 6.82e+08 Memory=  2380.733        t= 2.13e+03 R=   2e+04
+Depth=  110273 States=  4.5e+07 Transitions= 6.99e+08 Memory=  2408.076        t= 2.18e+03 R=   2e+04
+Depth=  110273 States=  4.6e+07 Transitions= 7.12e+08 Memory=  2434.346        t= 2.23e+03 R=   2e+04
+Depth=  110273 States=  4.7e+07 Transitions= 7.26e+08 Memory=  2462.959        t= 2.27e+03 R=   2e+04
+Depth=  110273 States=  4.8e+07 Transitions=  7.4e+08 Memory=  2493.721        t= 2.31e+03 R=   2e+04
+Depth=  110273 States=  4.9e+07 Transitions= 7.53e+08 Memory=  2522.627        t= 2.35e+03 R=   2e+04
+Depth=  110273 States=    5e+07 Transitions= 7.66e+08 Memory=  2552.803        t= 2.39e+03 R=   2e+04
+Depth=  110273 States=  5.1e+07 Transitions= 7.77e+08 Memory=  2583.565        t= 2.42e+03 R=   2e+04
+Depth=  110273 States=  5.2e+07 Transitions=  7.9e+08 Memory=  2611.006        t= 2.46e+03 R=   2e+04
+Depth=  110273 States=  5.3e+07 Transitions= 8.16e+08 Memory=  2641.182        t= 2.54e+03 R=   2e+04
+Depth=  110273 States=  5.4e+07 Transitions= 8.52e+08 Memory=  2673.213        t= 2.66e+03 R=   2e+04
+Depth=  110273 States=  5.5e+07 Transitions= 8.93e+08 Memory=  2707.197        t= 2.79e+03 R=   2e+04
+Depth=  110273 States=  5.6e+07 Transitions=  9.4e+08 Memory=  2738.447        t= 2.94e+03 R=   2e+04
+Depth=  110273 States=  5.7e+07 Transitions= 9.81e+08 Memory=  2767.354        t= 3.08e+03 R=   2e+04
+Depth=  110273 States=  5.8e+07 Transitions= 1.01e+09 Memory=  2794.111        t= 3.16e+03 R=   2e+04
+Depth=  110273 States=  5.9e+07 Transitions= 1.02e+09 Memory=  2822.529        t= 3.21e+03 R=   2e+04
+Depth=  110273 States=    6e+07 Transitions= 1.05e+09 Memory=  2850.654        t= 3.28e+03 R=   2e+04
+Depth=  110273 States=  6.1e+07 Transitions= 1.08e+09 Memory=  2879.170        t= 3.39e+03 R=   2e+04
+Depth=  110273 States=  6.2e+07 Transitions=  1.1e+09 Memory=  2908.858        t= 3.44e+03 R=   2e+04
+Depth=  110273 States=  6.3e+07 Transitions= 1.11e+09 Memory=  2935.908        t= 3.49e+03 R=   2e+04
+Depth=  110273 States=  6.4e+07 Transitions= 1.13e+09 Memory=  2960.518        t= 3.54e+03 R=   2e+04
+Depth=  110273 States=  6.5e+07 Transitions= 1.14e+09 Memory=  2990.205        t= 3.58e+03 R=   2e+04
+Depth=  110273 States=  6.6e+07 Transitions= 1.17e+09 Memory=  3018.721        t= 3.67e+03 R=   2e+04
+Depth=  110273 States=  6.7e+07 Transitions= 1.18e+09 Memory=  3047.041        t= 3.71e+03 R=   2e+04
+Depth=  110273 States=  6.8e+07 Transitions= 1.21e+09 Memory=  3074.580        t= 3.79e+03 R=   2e+04
+Depth=  110273 States=  6.9e+07 Transitions= 1.23e+09 Memory=  3101.826        t= 3.87e+03 R=   2e+04
+Depth=  110273 States=    7e+07 Transitions= 1.27e+09 Memory=  3130.635        t= 3.98e+03 R=   2e+04
+Depth=  110273 States=  7.1e+07 Transitions= 1.29e+09 Memory=  3157.197        t= 4.06e+03 R=   2e+04
+Depth=  110273 States=  7.2e+07 Transitions= 1.32e+09 Memory=  3184.443        t= 4.16e+03 R=   2e+04
+Depth=  110273 States=  7.3e+07 Transitions= 1.34e+09 Memory=  3211.690        t= 4.24e+03 R=   2e+04
+Depth=  110273 States=  7.4e+07 Transitions= 1.37e+09 Memory=  3239.229        t= 4.32e+03 R=   2e+04
+Depth=  110273 States=  7.5e+07 Transitions= 1.38e+09 Memory=  3266.670        t= 4.38e+03 R=   2e+04
+Depth=  110273 States=  7.6e+07 Transitions= 1.41e+09 Memory=  3294.307        t= 4.46e+03 R=   2e+04
+Depth=  110273 States=  7.7e+07 Transitions= 1.44e+09 Memory=  3322.432        t= 4.55e+03 R=   2e+04
+Depth=  110273 States=  7.8e+07 Transitions= 1.46e+09 Memory=  3352.608        t= 4.62e+03 R=   2e+04
+Depth=  110273 States=  7.9e+07 Transitions= 1.47e+09 Memory=  3378.193        t= 4.66e+03 R=   2e+04
+Depth=  110273 States=    8e+07 Transitions= 1.49e+09 Memory=  3407.490        t= 4.71e+03 R=   2e+04
+Depth=  110273 States=  8.1e+07 Transitions= 1.52e+09 Memory=  3436.397        t= 4.8e+03 R=   2e+04
+Depth=  110273 States=  8.2e+07 Transitions= 1.53e+09 Memory=  3461.690        t= 4.84e+03 R=   2e+04
+Depth=  110273 States=  8.3e+07 Transitions= 1.56e+09 Memory=  3491.182        t= 4.92e+03 R=   2e+04
+Depth=  110273 States=  8.4e+07 Transitions= 1.57e+09 Memory=  3519.990        t= 4.98e+03 R=   2e+04
+Depth=  110273 States=  8.5e+07 Transitions= 1.59e+09 Memory=  3546.065        t= 5.02e+03 R=   2e+04
+Depth=  110273 States=  8.6e+07 Transitions=  1.6e+09 Memory=  3575.459        t= 5.07e+03 R=   2e+04
+Depth=  110273 States=  8.7e+07 Transitions= 1.63e+09 Memory=  3605.049        t= 5.14e+03 R=   2e+04
+Depth=  110273 States=  8.8e+07 Transitions= 1.64e+09 Memory=  3632.588        t= 5.2e+03 R=   2e+04
+Depth=  110273 States=  8.9e+07 Transitions= 1.66e+09 Memory=  3661.787        t= 5.26e+03 R=   2e+04
+Depth=  110273 States=    9e+07 Transitions= 1.68e+09 Memory=  3688.154        t= 5.3e+03 R=   2e+04
+Depth=  110273 States=  9.1e+07 Transitions=  1.7e+09 Memory=  3715.986        t= 5.36e+03 R=   2e+04
+Depth=  110273 States=  9.2e+07 Transitions= 1.71e+09 Memory=  3743.623        t= 5.41e+03 R=   2e+04
+Depth=  110273 States=  9.3e+07 Transitions= 1.73e+09 Memory=  3771.358        t= 5.46e+03 R=   2e+04
+Depth=  110273 States=  9.4e+07 Transitions= 1.75e+09 Memory=  3797.920        t= 5.53e+03 R=   2e+04
+Depth=  110273 States=  9.5e+07 Transitions= 1.77e+09 Memory=  3827.119        t= 5.6e+03 R=   2e+04
+Depth=  110273 States=  9.6e+07 Transitions= 1.79e+09 Memory=  3858.858        t= 5.64e+03 R=   2e+04
+Depth=  110273 States=  9.7e+07 Transitions=  1.8e+09 Memory=  3885.713        t= 5.7e+03 R=   2e+04
+Depth=  110273 States=  9.8e+07 Transitions= 1.82e+09 Memory=  3914.717        t= 5.75e+03 R=   2e+04
+Depth=  110273 States=  9.9e+07 Transitions= 1.84e+09 Memory=  3941.670        t= 5.81e+03 R=   2e+04
+Depth=  110273 States=    1e+08 Transitions= 1.86e+09 Memory=  3968.818        t= 5.87e+03 R=   2e+04
+Depth=  110273 States= 1.01e+08 Transitions= 1.88e+09 Memory=  3994.990        t= 5.94e+03 R=   2e+04
+Depth=  110273 States= 1.02e+08 Transitions=  1.9e+09 Memory=  4023.311        t= 6.01e+03 R=   2e+04
+Depth=  110273 States= 1.03e+08 Transitions= 1.92e+09 Memory=  4053.193        t= 6.08e+03 R=   2e+04
+Depth=  110273 States= 1.04e+08 Transitions= 1.94e+09 Memory=  4078.389        t= 6.12e+03 R=   2e+04
+Depth=  110273 States= 1.05e+08 Transitions= 1.96e+09 Memory=  4108.467        t= 6.2e+03 R=   2e+04
+Depth=  110273 States= 1.06e+08 Transitions= 1.98e+09 Memory=  4137.080        t= 6.25e+03 R=   2e+04
+Depth=  110273 States= 1.07e+08 Transitions= 1.99e+09 Memory=  4163.740        t= 6.29e+03 R=   2e+04
+Depth=  110273 States= 1.08e+08 Transitions= 2.01e+09 Memory=  4191.768        t= 6.35e+03 R=   2e+04
+Depth=  110273 States= 1.09e+08 Transitions= 2.03e+09 Memory=  4221.651        t= 6.4e+03 R=   2e+04
+Depth=  110273 States=  1.1e+08 Transitions= 2.04e+09 Memory=  4249.971        t= 6.45e+03 R=   2e+04
+Depth=  110273 States= 1.11e+08 Transitions= 2.07e+09 Memory=  4278.877        t= 6.52e+03 R=   2e+04
+Depth=  110273 States= 1.12e+08 Transitions= 2.09e+09 Memory=  4308.174        t= 6.59e+03 R=   2e+04
+Depth=  110273 States= 1.13e+08 Transitions=  2.1e+09 Memory=  4333.760        t= 6.64e+03 R=   2e+04
+Depth=  110273 States= 1.14e+08 Transitions= 2.13e+09 Memory=  4363.740        t= 6.71e+03 R=   2e+04
+Depth=  110273 States= 1.15e+08 Transitions= 2.14e+09 Memory=  4392.158        t= 6.76e+03 R=   2e+04
+Depth=  110273 States= 1.16e+08 Transitions= 2.16e+09 Memory=  4419.014        t= 6.81e+03 R=   2e+04
+Depth=  110273 States= 1.17e+08 Transitions= 2.17e+09 Memory=  4446.846        t= 6.86e+03 R=   2e+04
+Depth=  110273 States= 1.18e+08 Transitions= 2.19e+09 Memory=  4476.338        t= 6.91e+03 R=   2e+04
+Depth=  110273 States= 1.19e+08 Transitions= 2.21e+09 Memory=  4505.440        t= 6.96e+03 R=   2e+04
+Depth=  110273 States=  1.2e+08 Transitions= 2.22e+09 Memory=  4529.561        t= 7.01e+03 R=   2e+04
+Depth=  110273 States= 1.21e+08 Transitions= 2.24e+09 Memory=  4557.588        t= 7.07e+03 R=   2e+04
+Depth=  110273 States= 1.22e+08 Transitions= 2.26e+09 Memory=  4584.736        t= 7.12e+03 R=   2e+04
+Depth=  110273 States= 1.23e+08 Transitions= 2.27e+09 Memory=  4612.764        t= 7.18e+03 R=   2e+04
+Depth=  110273 States= 1.24e+08 Transitions= 2.29e+09 Memory=  4640.303        t= 7.22e+03 R=   2e+04
+Depth=  110273 States= 1.25e+08 Transitions= 2.32e+09 Memory=  4669.404        t= 7.32e+03 R=   2e+04
+Depth=  110273 States= 1.26e+08 Transitions= 2.35e+09 Memory=  4698.018        t= 7.44e+03 R=   2e+04
+Depth=  110273 States= 1.27e+08 Transitions= 2.37e+09 Memory=  4728.877        t= 7.49e+03 R=   2e+04
+Depth=  110273 States= 1.28e+08 Transitions= 2.38e+09 Memory=  4757.393        t= 7.54e+03 R=   2e+04
+Depth=  110273 States= 1.29e+08 Transitions=  2.4e+09 Memory=  4785.811        t= 7.6e+03 R=   2e+04
+Depth=  110273 States=  1.3e+08 Transitions= 2.42e+09 Memory=  4814.033        t= 7.66e+03 R=   2e+04
+Depth=  110273 States= 1.31e+08 Transitions= 2.44e+09 Memory=  4843.428        t= 7.72e+03 R=   2e+04
+Depth=  110273 States= 1.32e+08 Transitions= 2.46e+09 Memory=  4871.943        t= 7.78e+03 R=   2e+04
+Depth=  110273 States= 1.33e+08 Transitions= 2.49e+09 Memory=  4898.311        t= 7.88e+03 R=   2e+04
+Depth=  110273 States= 1.34e+08 Transitions= 2.51e+09 Memory=  4926.143        t= 7.95e+03 R=   2e+04
+Depth=  110273 States= 1.35e+08 Transitions= 2.54e+09 Memory=  4953.389        t= 8.02e+03 R=   2e+04
+pan: resizing hashtable to -w28..  done
+Depth=  110273 States= 1.36e+08 Transitions= 2.56e+09 Memory=  7001.389        t= 8.13e+03 R=   2e+04
+Depth=  110273 States= 1.37e+08 Transitions= 2.58e+09 Memory=  7001.389        t= 8.19e+03 R=   2e+04
+Depth=  110273 States= 1.38e+08 Transitions= 2.61e+09 Memory=  7018.381        t= 8.26e+03 R=   2e+04
+Depth=  110273 States= 1.39e+08 Transitions= 2.63e+09 Memory=  7045.236        t= 8.33e+03 R=   2e+04
+Depth=  110273 States=  1.4e+08 Transitions= 2.65e+09 Memory=  7073.068        t= 8.39e+03 R=   2e+04
+Depth=  110273 States= 1.41e+08 Transitions= 2.68e+09 Memory=  7100.022        t= 8.48e+03 R=   2e+04
+Depth=  110273 States= 1.42e+08 Transitions=  2.7e+09 Memory=  7128.733        t= 8.54e+03 R=   2e+04
+Depth=  110273 States= 1.43e+08 Transitions= 2.72e+09 Memory=  7155.686        t= 8.6e+03 R=   2e+04
+Depth=  110273 States= 1.44e+08 Transitions= 2.73e+09 Memory=  7181.662        t= 8.65e+03 R=   2e+04
+Depth=  110273 States= 1.45e+08 Transitions= 2.74e+09 Memory=  7209.885        t= 8.69e+03 R=   2e+04
+Depth=  110273 States= 1.46e+08 Transitions= 2.76e+09 Memory=  7239.963        t= 8.73e+03 R=   2e+04
+Depth=  110273 States= 1.47e+08 Transitions= 2.77e+09 Memory=  7269.846        t= 8.77e+03 R=   2e+04
+Depth=  110273 States= 1.48e+08 Transitions= 2.79e+09 Memory=  7299.533        t= 8.82e+03 R=   2e+04
+Depth=  110273 States= 1.49e+08 Transitions=  2.8e+09 Memory=  7327.365        t= 8.87e+03 R=   2e+04
+Depth=  110273 States=  1.5e+08 Transitions= 2.82e+09 Memory=  7356.760        t= 8.92e+03 R=   2e+04
+Depth=  110273 States= 1.51e+08 Transitions= 2.84e+09 Memory=  7384.983        t= 8.97e+03 R=   2e+04
+Depth=  110273 States= 1.52e+08 Transitions= 2.85e+09 Memory=  7412.522        t= 9.02e+03 R=   2e+04
+Depth=  110273 States= 1.53e+08 Transitions= 2.87e+09 Memory=  7440.256        t= 9.09e+03 R=   2e+04
+Depth=  110273 States= 1.54e+08 Transitions= 2.91e+09 Memory=  7471.604        t= 9.2e+03 R=   2e+04
+Depth=  110273 States= 1.55e+08 Transitions= 2.95e+09 Memory=  7506.662        t= 9.32e+03 R=   2e+04
+Depth=  110273 States= 1.56e+08 Transitions=    3e+09 Memory=  7540.744        t= 9.48e+03 R=   2e+04
+Depth=  110273 States= 1.57e+08 Transitions= 3.04e+09 Memory=  7570.041        t= 9.62e+03 R=   2e+04
+Depth=  110273 States= 1.58e+08 Transitions= 3.07e+09 Memory=  7593.772        t= 9.72e+03 R=   2e+04
+Depth=  110273 States= 1.59e+08 Transitions= 3.09e+09 Memory=  7622.580        t= 9.77e+03 R=   2e+04
+Depth=  110273 States=  1.6e+08 Transitions= 3.11e+09 Memory=  7650.315        t= 9.83e+03 R=   2e+04
+Depth=  110273 States= 1.61e+08 Transitions= 3.14e+09 Memory=  7678.733        t= 9.93e+03 R=   2e+04
+Depth=  110273 States= 1.62e+08 Transitions= 3.16e+09 Memory=  7707.639        t=  1e+04 R=   2e+04
+Depth=  110273 States= 1.63e+08 Transitions= 3.18e+09 Memory=  7735.276        t= 1.01e+04 R=   2e+04
+Depth=  110273 States= 1.64e+08 Transitions=  3.2e+09 Memory=  7761.350        t= 1.01e+04 R=   2e+04
+Depth=  110273 States= 1.65e+08 Transitions= 3.21e+09 Memory=  7787.326        t= 1.02e+04 R=   2e+04
+Depth=  110273 States= 1.66e+08 Transitions= 3.23e+09 Memory=  7817.111        t= 1.02e+04 R=   2e+04
+Depth=  110273 States= 1.67e+08 Transitions= 3.25e+09 Memory=  7844.065        t= 1.03e+04 R=   2e+04
+Depth=  110273 States= 1.68e+08 Transitions= 3.28e+09 Memory=  7871.213        t= 1.04e+04 R=   2e+04
+Depth=  110273 States= 1.69e+08 Transitions=  3.3e+09 Memory=  7898.264        t= 1.04e+04 R=   2e+04
+Depth=  110273 States=  1.7e+08 Transitions= 3.32e+09 Memory=  7925.608        t= 1.05e+04 R=   2e+04
+Depth=  110273 States= 1.71e+08 Transitions= 3.35e+09 Memory=  7952.658        t= 1.06e+04 R=   2e+04
+Depth=  110273 States= 1.72e+08 Transitions= 3.38e+09 Memory=  7981.467        t= 1.07e+04 R=   2e+04
+Depth=  110273 States= 1.73e+08 Transitions=  3.4e+09 Memory=  8009.592        t= 1.08e+04 R=   2e+04
+Depth=  110273 States= 1.74e+08 Transitions= 3.43e+09 Memory=  8036.154        t= 1.08e+04 R=   2e+04
+Depth=  110273 States= 1.75e+08 Transitions= 3.45e+09 Memory=  8063.108        t= 1.09e+04 R=   2e+04
+Depth=  110273 States= 1.76e+08 Transitions= 3.48e+09 Memory=  8091.428        t= 1.1e+04 R=   2e+04
+Depth=  110273 States= 1.77e+08 Transitions=  3.5e+09 Memory=  8119.943        t= 1.11e+04 R=   2e+04
+Depth=  110273 States= 1.78e+08 Transitions= 3.53e+09 Memory=  8148.459        t= 1.12e+04 R=   2e+04
+Depth=  110273 States= 1.79e+08 Transitions= 3.54e+09 Memory=  8176.779        t= 1.12e+04 R=   2e+04
+Depth=  110273 States=  1.8e+08 Transitions= 3.56e+09 Memory=  8202.463        t= 1.12e+04 R=   2e+04
+Depth=  110273 States= 1.81e+08 Transitions= 3.58e+09 Memory=  8232.053        t= 1.13e+04 R=   2e+04
+Depth=  110273 States= 1.82e+08 Transitions=  3.6e+09 Memory=  8260.178        t= 1.14e+04 R=   2e+04
+Depth=  110273 States= 1.83e+08 Transitions= 3.62e+09 Memory=  8288.596        t= 1.14e+04 R=   2e+04
+Depth=  110273 States= 1.84e+08 Transitions= 3.64e+09 Memory=  8317.502        t= 1.15e+04 R=   2e+04
+Depth=  110273 States= 1.85e+08 Transitions= 3.66e+09 Memory=  8342.697        t= 1.16e+04 R=   2e+04
+Depth=  110273 States= 1.86e+08 Transitions= 3.67e+09 Memory=  8371.506        t= 1.16e+04 R=   2e+04
+Depth=  110273 States= 1.87e+08 Transitions= 3.69e+09 Memory=  8400.803        t= 1.17e+04 R=   2e+04
+Depth=  110273 States= 1.88e+08 Transitions= 3.71e+09 Memory=  8428.440        t= 1.17e+04 R=   2e+04
+Depth=  110273 States= 1.89e+08 Transitions= 3.73e+09 Memory=  8458.029        t= 1.18e+04 R=   2e+04
+Depth=  110273 States=  1.9e+08 Transitions= 3.75e+09 Memory=  8484.885        t= 1.18e+04 R=   2e+04
+Depth=  110273 States= 1.91e+08 Transitions= 3.76e+09 Memory=  8512.522        t= 1.19e+04 R=   2e+04
+Depth=  110273 States= 1.92e+08 Transitions= 3.78e+09 Memory=  8540.549        t= 1.19e+04 R=   2e+04
+Depth=  110273 States= 1.93e+08 Transitions= 3.79e+09 Memory=  8567.893        t= 1.2e+04 R=   2e+04
+Depth=  110273 States= 1.94e+08 Transitions= 3.82e+09 Memory=  8595.041        t= 1.21e+04 R=   2e+04
+Depth=  110273 States= 1.95e+08 Transitions= 3.84e+09 Memory=  8626.584        t= 1.21e+04 R=   2e+04
+Depth=  110273 States= 1.96e+08 Transitions= 3.85e+09 Memory=  8655.295        t= 1.22e+04 R=   2e+04
+Depth=  110273 States= 1.97e+08 Transitions= 3.87e+09 Memory=  8683.908        t= 1.22e+04 R=   2e+04
+Depth=  110273 States= 1.98e+08 Transitions= 3.89e+09 Memory=  8711.447        t= 1.23e+04 R=   2e+04
+Depth=  110273 States= 1.99e+08 Transitions= 3.91e+09 Memory=  8738.108        t= 1.23e+04 R=   2e+04
+Depth=  110273 States=    2e+08 Transitions= 3.93e+09 Memory=  8766.818        t= 1.24e+04 R=   2e+04
+Depth=  110273 States= 2.01e+08 Transitions= 3.95e+09 Memory=  8796.701        t= 1.25e+04 R=   2e+04
+Depth=  110273 States= 2.02e+08 Transitions= 3.96e+09 Memory=  8821.897        t= 1.25e+04 R=   2e+04
+Depth=  110273 States= 2.03e+08 Transitions= 3.99e+09 Memory=  8852.072        t= 1.26e+04 R=   2e+04
+Depth=  110273 States= 2.04e+08 Transitions=    4e+09 Memory=  8880.490        t= 1.26e+04 R=   2e+04
+Depth=  110273 States= 2.05e+08 Transitions= 4.02e+09 Memory=  8907.541        t= 1.27e+04 R=   2e+04
+Depth=  110273 States= 2.06e+08 Transitions= 4.03e+09 Memory=  8935.373        t= 1.27e+04 R=   2e+04
+Depth=  110273 States= 2.07e+08 Transitions= 4.05e+09 Memory=  8965.354        t= 1.28e+04 R=   2e+04
+Depth=  110273 States= 2.08e+08 Transitions= 4.07e+09 Memory=  8993.674        t= 1.28e+04 R=   2e+04
+Depth=  110273 States= 2.09e+08 Transitions= 4.09e+09 Memory=  9022.190        t= 1.29e+04 R=   2e+04
+Depth=  110273 States=  2.1e+08 Transitions= 4.11e+09 Memory=  9052.072        t= 1.3e+04 R=   2e+04
+Depth=  110273 States= 2.11e+08 Transitions= 4.13e+09 Memory=  9077.268        t= 1.3e+04 R=   2e+04
+Depth=  110273 States= 2.12e+08 Transitions= 4.15e+09 Memory=  9107.248        t= 1.31e+04 R=   2e+04
+Depth=  110273 States= 2.13e+08 Transitions= 4.16e+09 Memory=  9135.861        t= 1.31e+04 R=   2e+04
+Depth=  110273 States= 2.14e+08 Transitions= 4.18e+09 Memory=  9162.619        t= 1.32e+04 R=   2e+04
+Depth=  110273 States= 2.15e+08 Transitions=  4.2e+09 Memory=  9190.549        t= 1.32e+04 R=   2e+04
+Depth=  110273 States= 2.16e+08 Transitions= 4.21e+09 Memory=  9220.139        t= 1.33e+04 R=   2e+04
+Depth=  110273 States= 2.17e+08 Transitions= 4.23e+09 Memory=  9248.947        t= 1.33e+04 R=   2e+04
+Depth=  110273 States= 2.18e+08 Transitions= 4.25e+09 Memory=  9273.166        t= 1.34e+04 R=   2e+04
+Depth=  110273 States= 2.19e+08 Transitions= 4.26e+09 Memory=  9301.096        t= 1.34e+04 R=   2e+04
+Depth=  110273 States=  2.2e+08 Transitions= 4.28e+09 Memory=  9329.221        t= 1.35e+04 R=   2e+04
+Depth=  110273 States= 2.21e+08 Transitions= 4.29e+09 Memory=  9356.858        t= 1.35e+04 R=   2e+04
+Depth=  110273 States= 2.22e+08 Transitions= 4.32e+09 Memory=  9384.494        t= 1.36e+04 R=   2e+04
+Depth=  110273 States= 2.23e+08 Transitions= 4.35e+09 Memory=  9414.279        t= 1.37e+04 R=   2e+04
+Depth=  110273 States= 2.24e+08 Transitions= 4.37e+09 Memory=  9445.139        t= 1.38e+04 R=   2e+04
+Depth=  110273 States= 2.25e+08 Transitions= 4.39e+09 Memory=  9473.361        t= 1.38e+04 R=   2e+04
+Depth=  110273 States= 2.26e+08 Transitions=  4.4e+09 Memory=  9502.365        t= 1.39e+04 R=   2e+04
+Depth=  110273 States= 2.27e+08 Transitions= 4.42e+09 Memory=  9530.783        t= 1.39e+04 R=   2e+04
+Depth=  110273 States= 2.28e+08 Transitions= 4.44e+09 Memory=  9559.592        t= 1.4e+04 R=   2e+04
+Depth=  110273 States= 2.29e+08 Transitions= 4.47e+09 Memory=  9586.838        t= 1.41e+04 R=   2e+04
+Depth=  110273 States=  2.3e+08 Transitions= 4.49e+09 Memory=  9612.815        t= 1.42e+04 R=   2e+04
+Depth=  110273 States= 2.31e+08 Transitions= 4.52e+09 Memory=  9639.572        t= 1.42e+04 R=   2e+04
+Depth=  110273 States= 2.32e+08 Transitions= 4.54e+09 Memory=  9666.233        t= 1.43e+04 R=   2e+04
+Depth=  110273 States= 2.33e+08 Transitions= 4.57e+09 Memory=  9692.307        t= 1.44e+04 R=   2e+04
+Depth=  110273 States= 2.34e+08 Transitions= 4.59e+09 Memory=  9718.186        t= 1.45e+04 R=   2e+04
+Depth=  110273 States= 2.35e+08 Transitions= 4.61e+09 Memory=  9746.604        t= 1.45e+04 R=   2e+04
+Depth=  110273 States= 2.36e+08 Transitions= 4.63e+09 Memory=  9772.678        t= 1.46e+04 R=   2e+04
+Depth=  110273 States= 2.37e+08 Transitions= 4.65e+09 Memory=  9799.826        t= 1.46e+04 R=   2e+04
+Depth=  110273 States= 2.38e+08 Transitions= 4.67e+09 Memory=  9826.779        t= 1.47e+04 R=   2e+04
+Depth=  110273 States= 2.39e+08 Transitions=  4.7e+09 Memory=  9853.342        t= 1.48e+04 R=   2e+04
+Depth=  110273 States=  2.4e+08 Transitions= 4.72e+09 Memory=  9879.709        t= 1.49e+04 R=   2e+04
+Depth=  110273 States= 2.41e+08 Transitions= 4.75e+09 Memory=  9906.174        t= 1.49e+04 R=   2e+04
+Depth=  110273 States= 2.42e+08 Transitions= 4.77e+09 Memory=  9934.006        t= 1.5e+04 R=   2e+04
+Depth=  110273 States= 2.43e+08 Transitions= 4.79e+09 Memory=  9960.471        t= 1.51e+04 R=   2e+04
+Depth=  110273 States= 2.44e+08 Transitions= 4.81e+09 Memory=  9987.229        t= 1.51e+04 R=   2e+04
+Depth=  110273 States= 2.45e+08 Transitions= 4.83e+09 Memory= 10014.084        t= 1.52e+04 R=   2e+04
+Depth=  110273 States= 2.46e+08 Transitions= 4.85e+09 Memory= 10040.842        t= 1.53e+04 R=   2e+04
+Depth=  110273 States= 2.47e+08 Transitions= 4.87e+09 Memory= 10067.404        t= 1.53e+04 R=   2e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 110273, errors: 0
+1.5183145e+08 states, stored (2.47605e+08 visited)
+4.6348493e+09 states, matched
+4.8824543e+09 transitions (= visited+matched)
+7.5925477e+10 atomic steps
+hash conflicts: 9.7802755e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+16796.540      equivalent memory usage for states (stored*(State-vector + overhead))
+ 7578.458      actual memory usage for states (compression: 45.12%)
+               state-vector as stored = 16 byte + 36 byte overhead
+ 2048.000      memory used for hash table (-w28)
+  457.764      memory used for DFS stack (-m10000000)
+    1.290      memory lost to fragmentation
+10082.932      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 547261 4207 4043 2 2 ]
+unreached in proctype urcu_reader
+       line 268, "pan.___", state 57, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 88, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 104, "(1)"
+       line 249, "pan.___", state 112, "(1)"
+       line 253, "pan.___", state 124, "(1)"
+       line 257, "pan.___", state 132, "(1)"
+       line 407, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 190, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 204, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 222, "(1)"
+       line 253, "pan.___", state 242, "(1)"
+       line 257, "pan.___", state 250, "(1)"
+       line 687, "pan.___", state 269, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 407, "pan.___", state 276, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 308, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 322, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 340, "(1)"
+       line 253, "pan.___", state 360, "(1)"
+       line 257, "pan.___", state 368, "(1)"
+       line 407, "pan.___", state 387, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 419, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 433, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 451, "(1)"
+       line 253, "pan.___", state 471, "(1)"
+       line 257, "pan.___", state 479, "(1)"
+       line 407, "pan.___", state 500, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 502, "(1)"
+       line 407, "pan.___", state 503, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 503, "else"
+       line 407, "pan.___", state 506, "(1)"
+       line 411, "pan.___", state 514, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 516, "(1)"
+       line 411, "pan.___", state 517, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 517, "else"
+       line 411, "pan.___", state 520, "(1)"
+       line 411, "pan.___", state 521, "(1)"
+       line 411, "pan.___", state 521, "(1)"
+       line 409, "pan.___", state 526, "((i<1))"
+       line 409, "pan.___", state 526, "((i>=1))"
+       line 416, "pan.___", state 532, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 534, "(1)"
+       line 416, "pan.___", state 535, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 535, "else"
+       line 416, "pan.___", state 538, "(1)"
+       line 416, "pan.___", state 539, "(1)"
+       line 416, "pan.___", state 539, "(1)"
+       line 420, "pan.___", state 546, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 548, "(1)"
+       line 420, "pan.___", state 549, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 549, "else"
+       line 420, "pan.___", state 552, "(1)"
+       line 420, "pan.___", state 553, "(1)"
+       line 420, "pan.___", state 553, "(1)"
+       line 418, "pan.___", state 558, "((i<2))"
+       line 418, "pan.___", state 558, "((i>=2))"
+       line 245, "pan.___", state 564, "(1)"
+       line 249, "pan.___", state 572, "(1)"
+       line 249, "pan.___", state 573, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 573, "else"
+       line 247, "pan.___", state 578, "((i<1))"
+       line 247, "pan.___", state 578, "((i>=1))"
+       line 253, "pan.___", state 584, "(1)"
+       line 253, "pan.___", state 585, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 585, "else"
+       line 257, "pan.___", state 592, "(1)"
+       line 257, "pan.___", state 593, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 593, "else"
+       line 255, "pan.___", state 598, "((i<2))"
+       line 255, "pan.___", state 598, "((i>=2))"
+       line 262, "pan.___", state 602, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 602, "else"
+       line 427, "pan.___", state 604, "(1)"
+       line 427, "pan.___", state 604, "(1)"
+       line 687, "pan.___", state 607, "cached_urcu_active_readers = (tmp+1)"
+       line 687, "pan.___", state 608, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 687, "pan.___", state 609, "(1)"
+       line 407, "pan.___", state 616, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 648, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 662, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 680, "(1)"
+       line 253, "pan.___", state 700, "(1)"
+       line 257, "pan.___", state 708, "(1)"
+       line 407, "pan.___", state 734, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 766, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 780, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 798, "(1)"
+       line 253, "pan.___", state 818, "(1)"
+       line 257, "pan.___", state 826, "(1)"
+       line 407, "pan.___", state 845, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 847, "(1)"
+       line 407, "pan.___", state 848, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 848, "else"
+       line 407, "pan.___", state 851, "(1)"
+       line 411, "pan.___", state 859, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 861, "(1)"
+       line 411, "pan.___", state 862, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 862, "else"
+       line 411, "pan.___", state 865, "(1)"
+       line 411, "pan.___", state 866, "(1)"
+       line 411, "pan.___", state 866, "(1)"
+       line 409, "pan.___", state 871, "((i<1))"
+       line 409, "pan.___", state 871, "((i>=1))"
+       line 416, "pan.___", state 877, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 879, "(1)"
+       line 416, "pan.___", state 880, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 880, "else"
+       line 416, "pan.___", state 883, "(1)"
+       line 416, "pan.___", state 884, "(1)"
+       line 416, "pan.___", state 884, "(1)"
+       line 420, "pan.___", state 891, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 893, "(1)"
+       line 420, "pan.___", state 894, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 894, "else"
+       line 420, "pan.___", state 897, "(1)"
+       line 420, "pan.___", state 898, "(1)"
+       line 420, "pan.___", state 898, "(1)"
+       line 418, "pan.___", state 903, "((i<2))"
+       line 418, "pan.___", state 903, "((i>=2))"
+       line 245, "pan.___", state 909, "(1)"
+       line 249, "pan.___", state 917, "(1)"
+       line 249, "pan.___", state 918, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 918, "else"
+       line 247, "pan.___", state 923, "((i<1))"
+       line 247, "pan.___", state 923, "((i>=1))"
+       line 253, "pan.___", state 929, "(1)"
+       line 253, "pan.___", state 930, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 930, "else"
+       line 257, "pan.___", state 937, "(1)"
+       line 257, "pan.___", state 938, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 938, "else"
+       line 255, "pan.___", state 943, "((i<2))"
+       line 255, "pan.___", state 943, "((i>=2))"
+       line 262, "pan.___", state 947, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 947, "else"
+       line 427, "pan.___", state 949, "(1)"
+       line 427, "pan.___", state 949, "(1)"
+       line 695, "pan.___", state 953, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 407, "pan.___", state 958, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 990, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1004, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1022, "(1)"
+       line 253, "pan.___", state 1042, "(1)"
+       line 257, "pan.___", state 1050, "(1)"
+       line 407, "pan.___", state 1072, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1104, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1118, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1136, "(1)"
+       line 253, "pan.___", state 1156, "(1)"
+       line 257, "pan.___", state 1164, "(1)"
+       line 407, "pan.___", state 1187, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1219, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1233, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1251, "(1)"
+       line 253, "pan.___", state 1271, "(1)"
+       line 257, "pan.___", state 1279, "(1)"
+       line 407, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1330, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1344, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1362, "(1)"
+       line 253, "pan.___", state 1382, "(1)"
+       line 257, "pan.___", state 1390, "(1)"
+       line 407, "pan.___", state 1414, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1446, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1460, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1478, "(1)"
+       line 253, "pan.___", state 1498, "(1)"
+       line 257, "pan.___", state 1506, "(1)"
+       line 407, "pan.___", state 1525, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1557, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1571, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1589, "(1)"
+       line 253, "pan.___", state 1609, "(1)"
+       line 257, "pan.___", state 1617, "(1)"
+       line 407, "pan.___", state 1639, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1671, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1685, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1703, "(1)"
+       line 253, "pan.___", state 1723, "(1)"
+       line 257, "pan.___", state 1731, "(1)"
+       line 734, "pan.___", state 1750, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 407, "pan.___", state 1757, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1789, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1803, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1821, "(1)"
+       line 253, "pan.___", state 1841, "(1)"
+       line 257, "pan.___", state 1849, "(1)"
+       line 407, "pan.___", state 1868, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1900, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1914, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1932, "(1)"
+       line 253, "pan.___", state 1952, "(1)"
+       line 257, "pan.___", state 1960, "(1)"
+       line 407, "pan.___", state 1981, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 1983, "(1)"
+       line 407, "pan.___", state 1984, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 1984, "else"
+       line 407, "pan.___", state 1987, "(1)"
+       line 411, "pan.___", state 1995, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 1997, "(1)"
+       line 411, "pan.___", state 1998, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 1998, "else"
+       line 411, "pan.___", state 2001, "(1)"
+       line 411, "pan.___", state 2002, "(1)"
+       line 411, "pan.___", state 2002, "(1)"
+       line 409, "pan.___", state 2007, "((i<1))"
+       line 409, "pan.___", state 2007, "((i>=1))"
+       line 416, "pan.___", state 2013, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 2015, "(1)"
+       line 416, "pan.___", state 2016, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 2016, "else"
+       line 416, "pan.___", state 2019, "(1)"
+       line 416, "pan.___", state 2020, "(1)"
+       line 416, "pan.___", state 2020, "(1)"
+       line 420, "pan.___", state 2027, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 2029, "(1)"
+       line 420, "pan.___", state 2030, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 2030, "else"
+       line 420, "pan.___", state 2033, "(1)"
+       line 420, "pan.___", state 2034, "(1)"
+       line 420, "pan.___", state 2034, "(1)"
+       line 418, "pan.___", state 2039, "((i<2))"
+       line 418, "pan.___", state 2039, "((i>=2))"
+       line 245, "pan.___", state 2045, "(1)"
+       line 249, "pan.___", state 2053, "(1)"
+       line 249, "pan.___", state 2054, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 2054, "else"
+       line 247, "pan.___", state 2059, "((i<1))"
+       line 247, "pan.___", state 2059, "((i>=1))"
+       line 253, "pan.___", state 2065, "(1)"
+       line 253, "pan.___", state 2066, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 2066, "else"
+       line 257, "pan.___", state 2073, "(1)"
+       line 257, "pan.___", state 2074, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 2074, "else"
+       line 255, "pan.___", state 2079, "((i<2))"
+       line 255, "pan.___", state 2079, "((i>=2))"
+       line 262, "pan.___", state 2083, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 2083, "else"
+       line 427, "pan.___", state 2085, "(1)"
+       line 427, "pan.___", state 2085, "(1)"
+       line 734, "pan.___", state 2088, "cached_urcu_active_readers = (tmp+1)"
+       line 734, "pan.___", state 2089, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 734, "pan.___", state 2090, "(1)"
+       line 407, "pan.___", state 2097, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2129, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2143, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 2161, "(1)"
+       line 253, "pan.___", state 2181, "(1)"
+       line 257, "pan.___", state 2189, "(1)"
+       line 407, "pan.___", state 2214, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2246, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2260, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 2278, "(1)"
+       line 253, "pan.___", state 2298, "(1)"
+       line 257, "pan.___", state 2306, "(1)"
+       line 407, "pan.___", state 2325, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2357, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2371, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 2389, "(1)"
+       line 253, "pan.___", state 2409, "(1)"
+       line 257, "pan.___", state 2417, "(1)"
+       line 245, "pan.___", state 2448, "(1)"
+       line 253, "pan.___", state 2468, "(1)"
+       line 257, "pan.___", state 2476, "(1)"
+       line 245, "pan.___", state 2491, "(1)"
+       line 253, "pan.___", state 2511, "(1)"
+       line 257, "pan.___", state 2519, "(1)"
+       line 929, "pan.___", state 2536, "-end-"
+       (221 of 2536 states)
+unreached in proctype urcu_writer
+       line 407, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 245, "pan.___", state 109, "(1)"
+       line 249, "pan.___", state 117, "(1)"
+       line 253, "pan.___", state 129, "(1)"
+       line 268, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 167, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 180, "cache_dirty_rcu_ptr = 0"
+       line 407, "pan.___", state 220, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 234, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 252, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 266, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 284, "(1)"
+       line 249, "pan.___", state 292, "(1)"
+       line 253, "pan.___", state 304, "(1)"
+       line 257, "pan.___", state 312, "(1)"
+       line 411, "pan.___", state 347, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 365, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 379, "cache_dirty_rcu_data[i] = 0"
+       line 249, "pan.___", state 405, "(1)"
+       line 253, "pan.___", state 417, "(1)"
+       line 257, "pan.___", state 425, "(1)"
+       line 411, "pan.___", state 468, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 486, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 500, "cache_dirty_rcu_data[i] = 0"
+       line 249, "pan.___", state 526, "(1)"
+       line 253, "pan.___", state 538, "(1)"
+       line 257, "pan.___", state 546, "(1)"
+       line 411, "pan.___", state 579, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 597, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 611, "cache_dirty_rcu_data[i] = 0"
+       line 249, "pan.___", state 637, "(1)"
+       line 253, "pan.___", state 649, "(1)"
+       line 257, "pan.___", state 657, "(1)"
+       line 411, "pan.___", state 692, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 710, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 724, "cache_dirty_rcu_data[i] = 0"
+       line 249, "pan.___", state 750, "(1)"
+       line 253, "pan.___", state 762, "(1)"
+       line 257, "pan.___", state 770, "(1)"
+       line 268, "pan.___", state 818, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 827, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 840, "cache_dirty_rcu_ptr = 0"
+       line 245, "pan.___", state 865, "(1)"
+       line 249, "pan.___", state 873, "(1)"
+       line 253, "pan.___", state 885, "(1)"
+       line 257, "pan.___", state 893, "(1)"
+       line 268, "pan.___", state 924, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 933, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 946, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 955, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 971, "(1)"
+       line 249, "pan.___", state 979, "(1)"
+       line 253, "pan.___", state 991, "(1)"
+       line 257, "pan.___", state 999, "(1)"
+       line 268, "pan.___", state 1020, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1029, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1044, "(1)"
+       line 280, "pan.___", state 1051, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1067, "(1)"
+       line 249, "pan.___", state 1075, "(1)"
+       line 253, "pan.___", state 1087, "(1)"
+       line 257, "pan.___", state 1095, "(1)"
+       line 268, "pan.___", state 1126, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1135, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1148, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1157, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1173, "(1)"
+       line 249, "pan.___", state 1181, "(1)"
+       line 253, "pan.___", state 1193, "(1)"
+       line 257, "pan.___", state 1201, "(1)"
+       line 272, "pan.___", state 1227, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1240, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1249, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1265, "(1)"
+       line 249, "pan.___", state 1273, "(1)"
+       line 253, "pan.___", state 1285, "(1)"
+       line 257, "pan.___", state 1293, "(1)"
+       line 268, "pan.___", state 1324, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1333, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1346, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1355, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1371, "(1)"
+       line 249, "pan.___", state 1379, "(1)"
+       line 253, "pan.___", state 1391, "(1)"
+       line 257, "pan.___", state 1399, "(1)"
+       line 272, "pan.___", state 1425, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1438, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1447, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1463, "(1)"
+       line 249, "pan.___", state 1471, "(1)"
+       line 253, "pan.___", state 1483, "(1)"
+       line 257, "pan.___", state 1491, "(1)"
+       line 268, "pan.___", state 1522, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1531, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1544, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1553, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1569, "(1)"
+       line 249, "pan.___", state 1577, "(1)"
+       line 253, "pan.___", state 1589, "(1)"
+       line 257, "pan.___", state 1597, "(1)"
+       line 272, "pan.___", state 1623, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1636, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1645, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1661, "(1)"
+       line 249, "pan.___", state 1669, "(1)"
+       line 253, "pan.___", state 1681, "(1)"
+       line 257, "pan.___", state 1689, "(1)"
+       line 268, "pan.___", state 1720, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1729, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1742, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1751, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1767, "(1)"
+       line 249, "pan.___", state 1775, "(1)"
+       line 253, "pan.___", state 1787, "(1)"
+       line 257, "pan.___", state 1795, "(1)"
+       line 1304, "pan.___", state 1811, "-end-"
+       (118 of 1811 states)
+unreached in proctype :init:
+       (0 of 28 states)
+unreached in proctype :never:
+       line 1369, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 1.54e+04 seconds
+pan: rate  16109.46 states/second
+pan: avg transition delay 3.148e-06 usec
+cp .input.spin urcu_progress_reader.spin.input
+cp .input.spin.trail urcu_progress_reader.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi-compress'
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_reader.spin.input b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_reader.spin.input
new file mode 100644 (file)
index 0000000..83c0751
--- /dev/null
@@ -0,0 +1,1340 @@
+#define READER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer.define b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer.define
new file mode 100644 (file)
index 0000000..1e4417f
--- /dev/null
@@ -0,0 +1 @@
+#define WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer.log b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer.log
new file mode 100644 (file)
index 0000000..a04140c
--- /dev/null
@@ -0,0 +1,681 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi-compress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1362)
+depth 7: Claim reached state 9 (line 1367)
+depth 50: Claim reached state 9 (line 1366)
+Depth=    7070 States=    1e+06 Transitions= 1.16e+07 Memory=   493.010        t=   34.5 R=   3e+04
+Depth=    7070 States=    2e+06 Transitions= 2.31e+07 Memory=   521.330        t=   69.3 R=   3e+04
+Depth=    7070 States=    3e+06 Transitions= 3.43e+07 Memory=   549.455        t=    103 R=   3e+04
+pan: resizing hashtable to -w22..  done
+Depth=    8814 States=    4e+06 Transitions= 4.57e+07 Memory=   607.041        t=    138 R=   3e+04
+Depth=    8814 States=    5e+06 Transitions= 5.73e+07 Memory=   632.920        t=    174 R=   3e+04
+Depth=    8814 States=    6e+06 Transitions= 7.04e+07 Memory=   662.217        t=    215 R=   3e+04
+Depth=    8814 States=    7e+06 Transitions= 8.38e+07 Memory=   690.049        t=    257 R=   3e+04
+Depth=    8814 States=    8e+06 Transitions= 9.59e+07 Memory=   717.588        t=    294 R=   3e+04
+Depth=    8814 States=    9e+06 Transitions= 1.08e+08 Memory=   745.127        t=    332 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=    8814 States=    1e+07 Transitions= 1.29e+08 Memory=   897.639        t=    398 R=   3e+04
+Depth=    8814 States=  1.1e+07 Transitions= 1.41e+08 Memory=   926.154        t=    434 R=   3e+04
+Depth=    8814 States=  1.2e+07 Transitions= 1.53e+08 Memory=   953.401        t=    471 R=   3e+04
+Depth=    8814 States=  1.3e+07 Transitions= 1.64e+08 Memory=   980.940        t=    505 R=   3e+04
+Depth=    8814 States=  1.4e+07 Transitions= 1.75e+08 Memory=  1010.725        t=    539 R=   3e+04
+Depth=    8814 States=  1.5e+07 Transitions= 1.87e+08 Memory=  1037.580        t=    573 R=   3e+04
+Depth=    8814 States=  1.6e+07 Transitions= 1.98e+08 Memory=  1064.924        t=    607 R=   3e+04
+Depth=    8814 States=  1.7e+07 Transitions= 2.09e+08 Memory=  1094.904        t=    641 R=   3e+04
+Depth=    8814 States=  1.8e+07 Transitions= 2.23e+08 Memory=  1119.514        t=    683 R=   3e+04
+Depth=    8814 States=  1.9e+07 Transitions= 2.36e+08 Memory=  1146.662        t=    725 R=   3e+04
+Depth=    8814 States=    2e+07 Transitions= 2.57e+08 Memory=  1174.201        t=    790 R=   3e+04
+Depth=    8814 States=  2.1e+07 Transitions= 2.93e+08 Memory=  1205.549        t=    908 R=   2e+04
+Depth=    9015 States=  2.2e+07 Transitions= 3.11e+08 Memory=  1235.822        t=    965 R=   2e+04
+Depth=    9015 States=  2.3e+07 Transitions= 3.25e+08 Memory=  1264.533        t= 1.01e+03 R=   2e+04
+Depth=    9015 States=  2.4e+07 Transitions= 3.45e+08 Memory=  1293.244        t= 1.07e+03 R=   2e+04
+Depth=    9015 States=  2.5e+07 Transitions= 3.67e+08 Memory=  1321.467        t= 1.14e+03 R=   2e+04
+Depth=    9015 States=  2.6e+07 Transitions= 3.84e+08 Memory=  1350.764        t= 1.2e+03 R=   2e+04
+Depth=    9015 States=  2.7e+07 Transitions= 4.07e+08 Memory=  1380.158        t= 1.27e+03 R=   2e+04
+Depth=    9015 States=  2.8e+07 Transitions= 4.29e+08 Memory=  1405.940        t= 1.34e+03 R=   2e+04
+Depth=    9015 States=  2.9e+07 Transitions= 4.48e+08 Memory=  1433.381        t= 1.4e+03 R=   2e+04
+Depth=    9015 States=    3e+07 Transitions= 4.66e+08 Memory=  1461.506        t= 1.46e+03 R=   2e+04
+Depth=    9015 States=  3.1e+07 Transitions= 4.86e+08 Memory=  1489.143        t= 1.52e+03 R=   2e+04
+Depth=    9015 States=  3.2e+07 Transitions=  5.1e+08 Memory=  1516.486        t= 1.6e+03 R=   2e+04
+Depth=    9015 States=  3.3e+07 Transitions= 5.29e+08 Memory=  1543.733        t= 1.66e+03 R=   2e+04
+Depth=    9015 States=  3.4e+07 Transitions= 5.47e+08 Memory=  1571.760        t= 1.72e+03 R=   2e+04
+pan: resizing hashtable to -w26..  done
+Depth=    9015 States=  3.5e+07 Transitions= 5.69e+08 Memory=  2095.088        t= 1.79e+03 R=   2e+04
+Depth=    9015 States=  3.6e+07 Transitions= 5.95e+08 Memory=  2122.041        t= 1.87e+03 R=   2e+04
+Depth=    9015 States=  3.7e+07 Transitions= 6.12e+08 Memory=  2149.971        t= 1.93e+03 R=   2e+04
+Depth=    9015 States=  3.8e+07 Transitions= 6.29e+08 Memory=  2176.045        t= 1.98e+03 R=   2e+04
+Depth=    9015 States=  3.9e+07 Transitions= 6.42e+08 Memory=  2204.268        t= 2.02e+03 R=   2e+04
+Depth=    9015 States=    4e+07 Transitions= 6.56e+08 Memory=  2233.565        t= 2.06e+03 R=   2e+04
+Depth=    9015 States=  4.1e+07 Transitions= 6.69e+08 Memory=  2264.424        t= 2.1e+03 R=   2e+04
+Depth=    9015 States=  4.2e+07 Transitions= 6.82e+08 Memory=  2292.940        t= 2.14e+03 R=   2e+04
+Depth=    9015 States=  4.3e+07 Transitions= 6.95e+08 Memory=  2322.529        t= 2.18e+03 R=   2e+04
+Depth=    9015 States=  4.4e+07 Transitions= 7.07e+08 Memory=  2350.654        t= 2.22e+03 R=   2e+04
+Depth=    9015 States=  4.5e+07 Transitions= 7.28e+08 Memory=  2378.193        t= 2.29e+03 R=   2e+04
+Depth=    9015 States=  4.6e+07 Transitions= 7.63e+08 Memory=  2409.443        t= 2.4e+03 R=   2e+04
+Depth=    9015 States=  4.7e+07 Transitions= 7.99e+08 Memory=  2445.088        t= 2.52e+03 R=   2e+04
+Depth=    9015 States=  4.8e+07 Transitions= 8.49e+08 Memory=  2479.072        t= 2.69e+03 R=   2e+04
+Depth=    9015 States=  4.9e+07 Transitions= 8.91e+08 Memory=  2508.076        t= 2.82e+03 R=   2e+04
+Depth=    9015 States=    5e+07 Transitions= 9.24e+08 Memory=  2532.295        t= 2.93e+03 R=   2e+04
+Depth=    9015 States=  5.1e+07 Transitions= 9.39e+08 Memory=  2561.494        t= 2.98e+03 R=   2e+04
+Depth=    9015 States=  5.2e+07 Transitions=  9.6e+08 Memory=  2589.522        t= 3.05e+03 R=   2e+04
+Depth=    9015 States=  5.3e+07 Transitions= 9.99e+08 Memory=  2617.158        t= 3.17e+03 R=   2e+04
+Depth=    9015 States=  5.4e+07 Transitions= 1.01e+09 Memory=  2647.725        t= 3.22e+03 R=   2e+04
+Depth=    9015 States=  5.5e+07 Transitions= 1.03e+09 Memory=  2674.580        t= 3.27e+03 R=   2e+04
+Depth=    9015 States=  5.6e+07 Transitions= 1.05e+09 Memory=  2701.143        t= 3.32e+03 R=   2e+04
+Depth=    9015 States=  5.7e+07 Transitions= 1.06e+09 Memory=  2730.928        t= 3.36e+03 R=   2e+04
+Depth=    9015 States=  5.8e+07 Transitions= 1.09e+09 Memory=  2759.053        t= 3.45e+03 R=   2e+04
+Depth=    9015 States=  5.9e+07 Transitions=  1.1e+09 Memory=  2786.983        t= 3.5e+03 R=   2e+04
+Depth=    9015 States=    6e+07 Transitions= 1.13e+09 Memory=  2814.619        t= 3.6e+03 R=   2e+04
+Depth=    9015 States=  6.1e+07 Transitions= 1.15e+09 Memory=  2841.670        t= 3.66e+03 R=   2e+04
+Depth=    9015 States=  6.2e+07 Transitions= 1.19e+09 Memory=  2870.576        t= 3.77e+03 R=   2e+04
+Depth=    9015 States=  6.3e+07 Transitions= 1.21e+09 Memory=  2897.627        t= 3.84e+03 R=   2e+04
+Depth=    9015 States=  6.4e+07 Transitions= 1.24e+09 Memory=  2925.166        t= 3.94e+03 R=   2e+04
+Depth=    9015 States=  6.5e+07 Transitions= 1.26e+09 Memory=  2952.803        t= 4.01e+03 R=   2e+04
+Depth=    9015 States=  6.6e+07 Transitions= 1.28e+09 Memory=  2979.756        t= 4.09e+03 R=   2e+04
+Depth=    9015 States=  6.7e+07 Transitions=  1.3e+09 Memory=  3007.686        t= 4.15e+03 R=   2e+04
+Depth=    9015 States=  6.8e+07 Transitions= 1.33e+09 Memory=  3035.811        t= 4.23e+03 R=   2e+04
+Depth=    9015 States=  6.9e+07 Transitions= 1.36e+09 Memory=  3063.838        t= 4.32e+03 R=   2e+04
+Depth=    9522 States=    7e+07 Transitions= 1.38e+09 Memory=  3094.795        t= 4.39e+03 R=   2e+04
+Depth=    9522 States=  7.1e+07 Transitions= 1.39e+09 Memory=  3120.186        t= 4.43e+03 R=   2e+04
+Depth=    9522 States=  7.2e+07 Transitions= 1.41e+09 Memory=  3149.971        t= 4.5e+03 R=   2e+04
+Depth=    9553 States=  7.3e+07 Transitions= 1.44e+09 Memory=  3179.365        t= 4.57e+03 R=   2e+04
+Depth=    9553 States=  7.4e+07 Transitions= 1.45e+09 Memory=  3205.244        t= 4.61e+03 R=   2e+04
+Depth=    9553 States=  7.5e+07 Transitions= 1.47e+09 Memory=  3235.225        t= 4.69e+03 R=   2e+04
+Depth=    9553 States=  7.6e+07 Transitions= 1.49e+09 Memory=  3263.545        t= 4.75e+03 R=   2e+04
+Depth=    9553 States=  7.7e+07 Transitions= 1.51e+09 Memory=  3290.401        t= 4.8e+03 R=   2e+04
+Depth=    9553 States=  7.8e+07 Transitions= 1.52e+09 Memory=  3319.795        t= 4.84e+03 R=   2e+04
+Depth=    9553 States=  7.9e+07 Transitions= 1.54e+09 Memory=  3349.190        t= 4.91e+03 R=   2e+04
+Depth=    9553 States=    8e+07 Transitions= 1.56e+09 Memory=  3377.803        t= 4.98e+03 R=   2e+04
+Depth=    9553 States=  8.1e+07 Transitions= 1.58e+09 Memory=  3406.904        t= 5.04e+03 R=   2e+04
+Depth=    9553 States=  8.2e+07 Transitions=  1.6e+09 Memory=  3434.443        t= 5.08e+03 R=   2e+04
+Depth=    9553 States=  8.3e+07 Transitions= 1.61e+09 Memory=  3462.178        t= 5.14e+03 R=   2e+04
+Depth=    9553 States=  8.4e+07 Transitions= 1.63e+09 Memory=  3490.205        t= 5.18e+03 R=   2e+04
+Depth=    9553 States=  8.5e+07 Transitions= 1.65e+09 Memory=  3517.549        t= 5.24e+03 R=   2e+04
+Depth=    9553 States=  8.6e+07 Transitions= 1.67e+09 Memory=  3544.502        t= 5.31e+03 R=   2e+04
+Depth=    9553 States=  8.7e+07 Transitions= 1.69e+09 Memory=  3574.190        t= 5.38e+03 R=   2e+04
+Depth=    9553 States=  8.8e+07 Transitions=  1.7e+09 Memory=  3606.026        t= 5.42e+03 R=   2e+04
+Depth=    9553 States=  8.9e+07 Transitions= 1.72e+09 Memory=  3632.588        t= 5.48e+03 R=   2e+04
+Depth=    9553 States=    9e+07 Transitions= 1.74e+09 Memory=  3662.080        t= 5.53e+03 R=   2e+04
+Depth=    9553 States=  9.1e+07 Transitions= 1.76e+09 Memory=  3689.229        t= 5.59e+03 R=   2e+04
+Depth=    9553 States=  9.2e+07 Transitions= 1.78e+09 Memory=  3716.572        t= 5.65e+03 R=   2e+04
+Depth=    9553 States=  9.3e+07 Transitions=  1.8e+09 Memory=  3743.135        t= 5.72e+03 R=   2e+04
+Depth=    9553 States=  9.4e+07 Transitions= 1.82e+09 Memory=  3771.553        t= 5.8e+03 R=   2e+04
+Depth=    9553 States=  9.5e+07 Transitions= 1.84e+09 Memory=  3801.436        t= 5.86e+03 R=   2e+04
+Depth=    9553 States=  9.6e+07 Transitions= 1.86e+09 Memory=  3827.803        t= 5.91e+03 R=   2e+04
+Depth=    9553 States=  9.7e+07 Transitions= 1.88e+09 Memory=  3857.881        t= 5.99e+03 R=   2e+04
+Depth=    9553 States=  9.8e+07 Transitions=  1.9e+09 Memory=  3886.494        t= 6.03e+03 R=   2e+04
+Depth=    9553 States=  9.9e+07 Transitions= 1.91e+09 Memory=  3914.229        t= 6.08e+03 R=   2e+04
+Depth=    9553 States=    1e+08 Transitions= 1.93e+09 Memory=  3942.549        t= 6.13e+03 R=   2e+04
+Depth=    9553 States= 1.01e+08 Transitions= 1.94e+09 Memory=  3972.236        t= 6.19e+03 R=   2e+04
+Depth=    9553 States= 1.02e+08 Transitions= 1.96e+09 Memory=  4001.729        t= 6.24e+03 R=   2e+04
+Depth=    9553 States= 1.03e+08 Transitions= 1.98e+09 Memory=  4030.049        t= 6.3e+03 R=   2e+04
+Depth=    9553 States= 1.04e+08 Transitions=    2e+09 Memory=  4059.639        t= 6.38e+03 R=   2e+04
+Depth=    9553 States= 1.05e+08 Transitions= 2.02e+09 Memory=  4086.006        t= 6.43e+03 R=   2e+04
+Depth=    9553 States= 1.06e+08 Transitions= 2.04e+09 Memory=  4115.889        t= 6.49e+03 R=   2e+04
+Depth=    9553 States= 1.07e+08 Transitions= 2.06e+09 Memory=  4144.990        t= 6.55e+03 R=   2e+04
+Depth=    9553 States= 1.08e+08 Transitions= 2.07e+09 Memory=  4172.822        t= 6.6e+03 R=   2e+04
+Depth=    9553 States= 1.09e+08 Transitions= 2.09e+09 Memory=  4200.752        t= 6.65e+03 R=   2e+04
+Depth=    9553 States=  1.1e+08 Transitions= 2.11e+09 Memory=  4230.147        t= 6.7e+03 R=   2e+04
+Depth=    9553 States= 1.11e+08 Transitions= 2.12e+09 Memory=  4259.932        t= 6.75e+03 R=   2e+04
+Depth=    9553 States= 1.12e+08 Transitions= 2.14e+09 Memory=  4284.541        t= 6.81e+03 R=   2e+04
+Depth=    9553 States= 1.13e+08 Transitions= 2.16e+09 Memory=  4312.471        t= 6.86e+03 R=   2e+04
+Depth=    9553 States= 1.14e+08 Transitions= 2.17e+09 Memory=  4340.401        t= 6.91e+03 R=   2e+04
+Depth=    9553 States= 1.15e+08 Transitions= 2.19e+09 Memory=  4368.526        t= 6.97e+03 R=   2e+04
+Depth=    9553 States= 1.16e+08 Transitions= 2.21e+09 Memory=  4396.651        t= 7.02e+03 R=   2e+04
+Depth=    9553 States= 1.17e+08 Transitions= 2.23e+09 Memory=  4423.897        t= 7.1e+03 R=   2e+04
+Depth=    9553 States= 1.18e+08 Transitions= 2.27e+09 Memory=  4453.779        t= 7.22e+03 R=   2e+04
+Depth=    9553 States= 1.19e+08 Transitions= 2.29e+09 Memory=  4483.955        t= 7.29e+03 R=   2e+04
+Depth=    9553 States=  1.2e+08 Transitions=  2.3e+09 Memory=  4513.545        t= 7.33e+03 R=   2e+04
+Depth=    9553 States= 1.21e+08 Transitions= 2.32e+09 Memory=  4541.279        t= 7.4e+03 R=   2e+04
+Depth=    9553 States= 1.22e+08 Transitions= 2.34e+09 Memory=  4569.893        t= 7.45e+03 R=   2e+04
+Depth=    9553 States= 1.23e+08 Transitions= 2.36e+09 Memory=  4599.483        t= 7.52e+03 R=   2e+04
+Depth=    9553 States= 1.24e+08 Transitions= 2.38e+09 Memory=  4627.705        t= 7.58e+03 R=   2e+04
+Depth=    9553 States= 1.25e+08 Transitions= 2.41e+09 Memory=  4654.658        t= 7.68e+03 R=   2e+04
+Depth=    9553 States= 1.26e+08 Transitions= 2.43e+09 Memory=  4681.807        t= 7.76e+03 R=   2e+04
+Depth=    9553 States= 1.27e+08 Transitions= 2.45e+09 Memory=  4708.760        t= 7.83e+03 R=   2e+04
+Depth=    9553 States= 1.28e+08 Transitions= 2.48e+09 Memory=  4735.518        t= 7.93e+03 R=   2e+04
+Depth=    9553 States= 1.29e+08 Transitions=  2.5e+09 Memory=  4762.178        t=  8e+03 R=   2e+04
+Depth=    9553 States=  1.3e+08 Transitions= 2.52e+09 Memory=  4790.303        t= 8.07e+03 R=   2e+04
+Depth=    9553 States= 1.31e+08 Transitions= 2.55e+09 Memory=  4817.451        t= 8.15e+03 R=   2e+04
+Depth=    9553 States= 1.32e+08 Transitions= 2.57e+09 Memory=  4845.186        t= 8.22e+03 R=   2e+04
+Depth=    9553 States= 1.33e+08 Transitions= 2.59e+09 Memory=  4872.139        t= 8.3e+03 R=   2e+04
+Depth=    9553 States= 1.34e+08 Transitions= 2.61e+09 Memory=  4900.850        t= 8.37e+03 R=   2e+04
+Depth=    9553 States= 1.35e+08 Transitions= 2.63e+09 Memory=  4927.803        t= 8.43e+03 R=   2e+04
+pan: resizing hashtable to -w28..  done
+Depth=    9553 States= 1.36e+08 Transitions= 2.65e+09 Memory=  6975.803        t= 8.5e+03 R=   2e+04
+Depth=    9553 States= 1.37e+08 Transitions= 2.66e+09 Memory=  6975.803        t= 8.54e+03 R=   2e+04
+Depth=    9553 States= 1.38e+08 Transitions= 2.67e+09 Memory=  6996.799        t= 8.58e+03 R=   2e+04
+Depth=    9553 States= 1.39e+08 Transitions= 2.69e+09 Memory=  7026.779        t= 8.62e+03 R=   2e+04
+Depth=    9553 States=  1.4e+08 Transitions=  2.7e+09 Memory=  7056.467        t= 8.68e+03 R=   2e+04
+Depth=    9553 States= 1.41e+08 Transitions= 2.72e+09 Memory=  7084.787        t= 8.72e+03 R=   2e+04
+Depth=    9553 States= 1.42e+08 Transitions= 2.74e+09 Memory=  7114.084        t= 8.77e+03 R=   2e+04
+Depth=    9553 States= 1.43e+08 Transitions= 2.75e+09 Memory=  7143.088        t= 8.82e+03 R=   2e+04
+Depth=    9553 States= 1.44e+08 Transitions= 2.77e+09 Memory=  7170.920        t= 8.88e+03 R=   2e+04
+Depth=    9553 States= 1.45e+08 Transitions= 2.79e+09 Memory=  7198.166        t= 8.94e+03 R=   2e+04
+Depth=    9553 States= 1.46e+08 Transitions= 2.82e+09 Memory=  7229.514        t= 9.06e+03 R=   2e+04
+Depth=    9553 States= 1.47e+08 Transitions= 2.86e+09 Memory=  7265.158        t= 9.18e+03 R=   2e+04
+Depth=    9553 States= 1.48e+08 Transitions= 2.91e+09 Memory=  7299.143        t= 9.34e+03 R=   2e+04
+Depth=    9553 States= 1.49e+08 Transitions= 2.95e+09 Memory=  7327.170        t= 9.47e+03 R=   2e+04
+Depth=    9553 States=  1.5e+08 Transitions= 2.99e+09 Memory=  7352.561        t= 9.59e+03 R=   2e+04
+Depth=    9553 States= 1.51e+08 Transitions=    3e+09 Memory=  7380.881        t= 9.64e+03 R=   2e+04
+Depth=    9553 States= 1.52e+08 Transitions= 3.02e+09 Memory=  7408.908        t= 9.7e+03 R=   2e+04
+Depth=    9553 States= 1.53e+08 Transitions= 3.05e+09 Memory=  7437.033        t= 9.8e+03 R=   2e+04
+Depth=    9553 States= 1.54e+08 Transitions= 3.08e+09 Memory=  7465.744        t= 9.89e+03 R=   2e+04
+Depth=    9553 States= 1.55e+08 Transitions=  3.1e+09 Memory=  7493.869        t= 9.95e+03 R=   2e+04
+Depth=    9553 States= 1.56e+08 Transitions= 3.11e+09 Memory=  7520.529        t= 9.99e+03 R=   2e+04
+Depth=    9553 States= 1.57e+08 Transitions= 3.13e+09 Memory=  7547.092        t=  1e+04 R=   2e+04
+Depth=    9553 States= 1.58e+08 Transitions= 3.14e+09 Memory=  7577.170        t= 1.01e+04 R=   2e+04
+Depth=    9553 States= 1.59e+08 Transitions= 3.17e+09 Memory=  7604.611        t= 1.02e+04 R=   2e+04
+Depth=    9553 States=  1.6e+08 Transitions= 3.19e+09 Memory=  7631.955        t= 1.02e+04 R=   2e+04
+Depth=    9553 States= 1.61e+08 Transitions= 3.21e+09 Memory=  7659.299        t= 1.03e+04 R=   2e+04
+Depth=    9553 States= 1.62e+08 Transitions= 3.24e+09 Memory=  7686.740        t= 1.04e+04 R=   2e+04
+Depth=    9553 States= 1.63e+08 Transitions= 3.27e+09 Memory=  7713.791        t= 1.05e+04 R=   2e+04
+Depth=    9553 States= 1.64e+08 Transitions= 3.29e+09 Memory=  7743.186        t= 1.06e+04 R=   2e+04
+Depth=    9553 States= 1.65e+08 Transitions= 3.31e+09 Memory=  7771.604        t= 1.06e+04 R=   2e+04
+Depth=    9553 States= 1.66e+08 Transitions= 3.34e+09 Memory=  7797.971        t= 1.07e+04 R=   2e+04
+Depth=    9553 States= 1.67e+08 Transitions= 3.37e+09 Memory=  7825.217        t= 1.08e+04 R=   2e+04
+Depth=    9553 States= 1.68e+08 Transitions= 3.39e+09 Memory=  7853.733        t= 1.09e+04 R=   2e+04
+Depth=    9553 States= 1.69e+08 Transitions= 3.41e+09 Memory=  7880.686        t= 1.09e+04 R=   2e+04
+Depth=    9553 States=  1.7e+08 Transitions= 3.44e+09 Memory=  7909.690        t= 1.1e+04 R=   2e+04
+Depth=    9553 States= 1.71e+08 Transitions= 3.46e+09 Memory=  7939.279        t= 1.11e+04 R=   2e+04
+Depth=    9553 States= 1.72e+08 Transitions= 3.47e+09 Memory=  7964.768        t= 1.11e+04 R=   2e+04
+Depth=    9553 States= 1.73e+08 Transitions=  3.5e+09 Memory=  7994.260        t= 1.12e+04 R=   2e+04
+Depth=    9553 States= 1.74e+08 Transitions= 3.52e+09 Memory=  8023.850        t= 1.13e+04 R=   2e+04
+Depth=    9553 States= 1.75e+08 Transitions= 3.53e+09 Memory=  8051.877        t= 1.13e+04 R=   2e+04
+Depth=    9553 States= 1.76e+08 Transitions= 3.56e+09 Memory=  8080.979        t= 1.14e+04 R=   2e+04
+Depth=    9553 States= 1.77e+08 Transitions= 3.57e+09 Memory=  8107.834        t= 1.15e+04 R=   2e+04
+Depth=    9553 States= 1.78e+08 Transitions= 3.59e+09 Memory=  8137.033        t= 1.15e+04 R=   2e+04
+Depth=    9553 States= 1.79e+08 Transitions= 3.61e+09 Memory=  8165.158        t= 1.16e+04 R=   2e+04
+Depth=    9553 States=  1.8e+08 Transitions= 3.63e+09 Memory=  8194.065        t= 1.16e+04 R=   2e+04
+Depth=    9553 States= 1.81e+08 Transitions= 3.65e+09 Memory=  8222.971        t= 1.17e+04 R=   2e+04
+Depth=    9553 States= 1.82e+08 Transitions= 3.66e+09 Memory=  8251.096        t= 1.17e+04 R=   2e+04
+Depth=    9553 States= 1.83e+08 Transitions= 3.68e+09 Memory=  8279.514        t= 1.18e+04 R=   2e+04
+Depth=    9553 States= 1.84e+08 Transitions= 3.69e+09 Memory=  8307.151        t= 1.18e+04 R=   2e+04
+Depth=    9553 States= 1.85e+08 Transitions= 3.71e+09 Memory=  8334.787        t= 1.19e+04 R=   2e+04
+Depth=    9553 States= 1.86e+08 Transitions= 3.73e+09 Memory=  8361.838        t= 1.2e+04 R=   2e+04
+Depth=    9553 States= 1.87e+08 Transitions= 3.75e+09 Memory=  8392.209        t= 1.2e+04 R=   2e+04
+Depth=    9553 States= 1.88e+08 Transitions= 3.77e+09 Memory=  8423.361        t= 1.21e+04 R=   2e+04
+Depth=    9553 States= 1.89e+08 Transitions= 3.78e+09 Memory=  8449.826        t= 1.21e+04 R=   2e+04
+Depth=    9553 States=  1.9e+08 Transitions=  3.8e+09 Memory=  8479.026        t= 1.22e+04 R=   2e+04
+Depth=    9553 States= 1.91e+08 Transitions= 3.82e+09 Memory=  8506.076        t= 1.22e+04 R=   2e+04
+Depth=    9553 States= 1.92e+08 Transitions= 3.84e+09 Memory=  8533.029        t= 1.23e+04 R=   2e+04
+Depth=    9553 States= 1.93e+08 Transitions= 3.86e+09 Memory=  8563.889        t= 1.24e+04 R=   2e+04
+Depth=    9553 States= 1.94e+08 Transitions= 3.88e+09 Memory=  8590.744        t= 1.24e+04 R=   2e+04
+Depth=    9553 States= 1.95e+08 Transitions=  3.9e+09 Memory=  8620.334        t= 1.25e+04 R=   2e+04
+Depth=    9553 States= 1.96e+08 Transitions= 3.92e+09 Memory=  8649.826        t= 1.25e+04 R=   2e+04
+Depth=    9553 States= 1.97e+08 Transitions= 3.93e+09 Memory=  8676.486        t= 1.26e+04 R=   2e+04
+Depth=    9553 States= 1.98e+08 Transitions= 3.95e+09 Memory=  8705.197        t= 1.26e+04 R=   2e+04
+Depth=    9553 States= 1.99e+08 Transitions= 3.96e+09 Memory=  8734.006        t= 1.27e+04 R=   2e+04
+Depth=    9553 States=    2e+08 Transitions= 3.98e+09 Memory=  8764.279        t= 1.27e+04 R=   2e+04
+Depth=    9553 States= 2.01e+08 Transitions=    4e+09 Memory=  8792.209        t= 1.28e+04 R=   2e+04
+Depth=    9553 States= 2.02e+08 Transitions= 4.03e+09 Memory=  8821.408        t= 1.29e+04 R=   2e+04
+Depth=    9553 States= 2.03e+08 Transitions= 4.04e+09 Memory=  8849.338        t= 1.29e+04 R=   2e+04
+Depth=    9553 States= 2.04e+08 Transitions= 4.06e+09 Memory=  8878.733        t= 1.3e+04 R=   2e+04
+Depth=    9553 States= 2.05e+08 Transitions= 4.08e+09 Memory=  8908.322        t= 1.31e+04 R=   2e+04
+Depth=    9553 States= 2.06e+08 Transitions= 4.09e+09 Memory=  8934.494        t= 1.31e+04 R=   2e+04
+Depth=    9553 States= 2.07e+08 Transitions= 4.11e+09 Memory=  8963.498        t= 1.32e+04 R=   2e+04
+Depth=    9553 States= 2.08e+08 Transitions= 4.13e+09 Memory=  8991.818        t= 1.32e+04 R=   2e+04
+Depth=    9553 States= 2.09e+08 Transitions= 4.14e+09 Memory=  9022.287        t= 1.33e+04 R=   2e+04
+Depth=    9553 States=  2.1e+08 Transitions= 4.16e+09 Memory=  9047.580        t= 1.33e+04 R=   2e+04
+Depth=    9553 States= 2.11e+08 Transitions= 4.18e+09 Memory=  9075.412        t= 1.34e+04 R=   2e+04
+Depth=    9553 States= 2.12e+08 Transitions= 4.19e+09 Memory=  9103.244        t= 1.34e+04 R=   2e+04
+Depth=    9553 States= 2.13e+08 Transitions= 4.21e+09 Memory=  9131.467        t= 1.35e+04 R=   2e+04
+Depth=    9553 States= 2.14e+08 Transitions= 4.22e+09 Memory=  9159.006        t= 1.35e+04 R=   2e+04
+Depth=    9553 States= 2.15e+08 Transitions= 4.26e+09 Memory=  9188.010        t= 1.36e+04 R=   2e+04
+Depth=    9553 States= 2.16e+08 Transitions= 4.29e+09 Memory=  9218.283        t= 1.37e+04 R=   2e+04
+Depth=    9553 States= 2.17e+08 Transitions=  4.3e+09 Memory=  9246.897        t= 1.37e+04 R=   2e+04
+Depth=    9553 States= 2.18e+08 Transitions= 4.32e+09 Memory=  9276.193        t= 1.38e+04 R=   2e+04
+Depth=    9553 States= 2.19e+08 Transitions= 4.33e+09 Memory=  9305.002        t= 1.39e+04 R=   2e+04
+Depth=    9553 States=  2.2e+08 Transitions= 4.35e+09 Memory=  9334.299        t= 1.39e+04 R=   2e+04
+Depth=    9553 States= 2.21e+08 Transitions= 4.37e+09 Memory=  9362.619        t= 1.4e+04 R=   2e+04
+Depth=    9553 States= 2.22e+08 Transitions=  4.4e+09 Memory=  9388.108        t= 1.41e+04 R=   2e+04
+Depth=    9553 States= 2.23e+08 Transitions= 4.43e+09 Memory=  9415.354        t= 1.42e+04 R=   2e+04
+Depth=    9553 States= 2.24e+08 Transitions= 4.45e+09 Memory=  9442.111        t= 1.42e+04 R=   2e+04
+Depth=    9553 States= 2.25e+08 Transitions= 4.48e+09 Memory=  9468.967        t= 1.43e+04 R=   2e+04
+Depth=    9553 States= 2.26e+08 Transitions=  4.5e+09 Memory=  9495.041        t= 1.44e+04 R=   2e+04
+Depth=    9553 States= 2.27e+08 Transitions= 4.52e+09 Memory=  9522.580        t= 1.44e+04 R=   2e+04
+Depth=    9553 States= 2.28e+08 Transitions= 4.54e+09 Memory=  9549.631        t= 1.45e+04 R=   2e+04
+Depth=    9553 States= 2.29e+08 Transitions= 4.56e+09 Memory=  9576.096        t= 1.46e+04 R=   2e+04
+Depth=    9553 States=  2.3e+08 Transitions= 4.59e+09 Memory=  9602.268        t= 1.47e+04 R=   2e+04
+Depth=    9553 States= 2.31e+08 Transitions=  4.6e+09 Memory=  9630.197        t= 1.47e+04 R=   2e+04
+Depth=    9553 States= 2.32e+08 Transitions= 4.63e+09 Memory=  9656.858        t= 1.48e+04 R=   2e+04
+Depth=    9553 States= 2.33e+08 Transitions= 4.66e+09 Memory=  9683.713        t= 1.49e+04 R=   2e+04
+Depth=    9553 States= 2.34e+08 Transitions= 4.68e+09 Memory=  9710.373        t= 1.5e+04 R=   2e+04
+Depth=    9553 States= 2.35e+08 Transitions=  4.7e+09 Memory=  9738.108        t= 1.5e+04 R=   2e+04
+Depth=    9553 States= 2.36e+08 Transitions= 4.72e+09 Memory=  9765.158        t= 1.51e+04 R=   2e+04
+Depth=    9553 States= 2.37e+08 Transitions= 4.74e+09 Memory=  9792.404        t= 1.52e+04 R=   2e+04
+Depth=    9553 States= 2.38e+08 Transitions= 4.77e+09 Memory=  9818.576        t= 1.52e+04 R=   2e+04
+Depth=    9553 States= 2.39e+08 Transitions= 4.79e+09 Memory=  9845.432        t= 1.53e+04 R=   2e+04
+Depth=    9553 States=  2.4e+08 Transitions=  4.8e+09 Memory=  9871.701        t= 1.53e+04 R=   2e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 9553, errors: 0
+1.4728139e+08 states, stored (2.40187e+08 visited)
+4.565323e+09 states, matched
+4.8055096e+09 transitions (= visited+matched)
+7.4787137e+10 atomic steps
+hash conflicts: 1.0275497e+09 (resolved)
+
+Stats on memory usage (in Megabytes):
+16293.183      equivalent memory usage for states (stored*(State-vector + overhead))
+ 7372.383      actual memory usage for states (compression: 45.25%)
+               state-vector as stored = 16 byte + 36 byte overhead
+ 2048.000      memory used for hash table (-w28)
+  457.764      memory used for DFS stack (-m10000000)
+    1.367      memory lost to fragmentation
+ 9876.779      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 592075 4097 3828 2 2 ]
+unreached in proctype urcu_reader
+       line 268, "pan.___", state 57, "cache_dirty_urcu_gp_ctr = 0"
+       line 276, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 88, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 104, "(1)"
+       line 249, "pan.___", state 112, "(1)"
+       line 253, "pan.___", state 124, "(1)"
+       line 257, "pan.___", state 132, "(1)"
+       line 407, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 190, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 204, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 222, "(1)"
+       line 253, "pan.___", state 242, "(1)"
+       line 257, "pan.___", state 250, "(1)"
+       line 687, "pan.___", state 269, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 407, "pan.___", state 276, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 308, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 322, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 340, "(1)"
+       line 253, "pan.___", state 360, "(1)"
+       line 257, "pan.___", state 368, "(1)"
+       line 407, "pan.___", state 387, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 419, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 433, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 451, "(1)"
+       line 253, "pan.___", state 471, "(1)"
+       line 257, "pan.___", state 479, "(1)"
+       line 407, "pan.___", state 500, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 502, "(1)"
+       line 407, "pan.___", state 503, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 503, "else"
+       line 407, "pan.___", state 506, "(1)"
+       line 411, "pan.___", state 514, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 516, "(1)"
+       line 411, "pan.___", state 517, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 517, "else"
+       line 411, "pan.___", state 520, "(1)"
+       line 411, "pan.___", state 521, "(1)"
+       line 411, "pan.___", state 521, "(1)"
+       line 409, "pan.___", state 526, "((i<1))"
+       line 409, "pan.___", state 526, "((i>=1))"
+       line 416, "pan.___", state 532, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 534, "(1)"
+       line 416, "pan.___", state 535, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 535, "else"
+       line 416, "pan.___", state 538, "(1)"
+       line 416, "pan.___", state 539, "(1)"
+       line 416, "pan.___", state 539, "(1)"
+       line 420, "pan.___", state 546, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 548, "(1)"
+       line 420, "pan.___", state 549, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 549, "else"
+       line 420, "pan.___", state 552, "(1)"
+       line 420, "pan.___", state 553, "(1)"
+       line 420, "pan.___", state 553, "(1)"
+       line 418, "pan.___", state 558, "((i<2))"
+       line 418, "pan.___", state 558, "((i>=2))"
+       line 245, "pan.___", state 564, "(1)"
+       line 249, "pan.___", state 572, "(1)"
+       line 249, "pan.___", state 573, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 573, "else"
+       line 247, "pan.___", state 578, "((i<1))"
+       line 247, "pan.___", state 578, "((i>=1))"
+       line 253, "pan.___", state 584, "(1)"
+       line 253, "pan.___", state 585, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 585, "else"
+       line 257, "pan.___", state 592, "(1)"
+       line 257, "pan.___", state 593, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 593, "else"
+       line 255, "pan.___", state 598, "((i<2))"
+       line 255, "pan.___", state 598, "((i>=2))"
+       line 262, "pan.___", state 602, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 602, "else"
+       line 427, "pan.___", state 604, "(1)"
+       line 427, "pan.___", state 604, "(1)"
+       line 687, "pan.___", state 607, "cached_urcu_active_readers = (tmp+1)"
+       line 687, "pan.___", state 608, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 687, "pan.___", state 609, "(1)"
+       line 407, "pan.___", state 616, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 648, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 662, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 680, "(1)"
+       line 253, "pan.___", state 700, "(1)"
+       line 257, "pan.___", state 708, "(1)"
+       line 407, "pan.___", state 734, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 766, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 780, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 798, "(1)"
+       line 253, "pan.___", state 818, "(1)"
+       line 257, "pan.___", state 826, "(1)"
+       line 407, "pan.___", state 845, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 847, "(1)"
+       line 407, "pan.___", state 848, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 848, "else"
+       line 407, "pan.___", state 851, "(1)"
+       line 411, "pan.___", state 859, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 861, "(1)"
+       line 411, "pan.___", state 862, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 862, "else"
+       line 411, "pan.___", state 865, "(1)"
+       line 411, "pan.___", state 866, "(1)"
+       line 411, "pan.___", state 866, "(1)"
+       line 409, "pan.___", state 871, "((i<1))"
+       line 409, "pan.___", state 871, "((i>=1))"
+       line 416, "pan.___", state 877, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 879, "(1)"
+       line 416, "pan.___", state 880, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 880, "else"
+       line 416, "pan.___", state 883, "(1)"
+       line 416, "pan.___", state 884, "(1)"
+       line 416, "pan.___", state 884, "(1)"
+       line 420, "pan.___", state 891, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 893, "(1)"
+       line 420, "pan.___", state 894, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 894, "else"
+       line 420, "pan.___", state 897, "(1)"
+       line 420, "pan.___", state 898, "(1)"
+       line 420, "pan.___", state 898, "(1)"
+       line 418, "pan.___", state 903, "((i<2))"
+       line 418, "pan.___", state 903, "((i>=2))"
+       line 245, "pan.___", state 909, "(1)"
+       line 249, "pan.___", state 917, "(1)"
+       line 249, "pan.___", state 918, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 918, "else"
+       line 247, "pan.___", state 923, "((i<1))"
+       line 247, "pan.___", state 923, "((i>=1))"
+       line 253, "pan.___", state 929, "(1)"
+       line 253, "pan.___", state 930, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 930, "else"
+       line 257, "pan.___", state 937, "(1)"
+       line 257, "pan.___", state 938, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 938, "else"
+       line 255, "pan.___", state 943, "((i<2))"
+       line 255, "pan.___", state 943, "((i>=2))"
+       line 262, "pan.___", state 947, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 947, "else"
+       line 427, "pan.___", state 949, "(1)"
+       line 427, "pan.___", state 949, "(1)"
+       line 695, "pan.___", state 953, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 407, "pan.___", state 958, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 990, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1004, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1022, "(1)"
+       line 253, "pan.___", state 1042, "(1)"
+       line 257, "pan.___", state 1050, "(1)"
+       line 407, "pan.___", state 1072, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1104, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1118, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1136, "(1)"
+       line 253, "pan.___", state 1156, "(1)"
+       line 257, "pan.___", state 1164, "(1)"
+       line 407, "pan.___", state 1187, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1219, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1233, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1251, "(1)"
+       line 253, "pan.___", state 1271, "(1)"
+       line 257, "pan.___", state 1279, "(1)"
+       line 407, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1330, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1344, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1362, "(1)"
+       line 253, "pan.___", state 1382, "(1)"
+       line 257, "pan.___", state 1390, "(1)"
+       line 407, "pan.___", state 1414, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1446, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1460, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1478, "(1)"
+       line 253, "pan.___", state 1498, "(1)"
+       line 257, "pan.___", state 1506, "(1)"
+       line 407, "pan.___", state 1525, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1557, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1571, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1589, "(1)"
+       line 253, "pan.___", state 1609, "(1)"
+       line 257, "pan.___", state 1617, "(1)"
+       line 407, "pan.___", state 1639, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1671, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1685, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1703, "(1)"
+       line 253, "pan.___", state 1723, "(1)"
+       line 257, "pan.___", state 1731, "(1)"
+       line 734, "pan.___", state 1750, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 407, "pan.___", state 1757, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1789, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1803, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1821, "(1)"
+       line 253, "pan.___", state 1841, "(1)"
+       line 257, "pan.___", state 1849, "(1)"
+       line 407, "pan.___", state 1868, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 1900, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 1914, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1932, "(1)"
+       line 253, "pan.___", state 1952, "(1)"
+       line 257, "pan.___", state 1960, "(1)"
+       line 407, "pan.___", state 1981, "cache_dirty_urcu_gp_ctr = 0"
+       line 407, "pan.___", state 1983, "(1)"
+       line 407, "pan.___", state 1984, "(cache_dirty_urcu_gp_ctr)"
+       line 407, "pan.___", state 1984, "else"
+       line 407, "pan.___", state 1987, "(1)"
+       line 411, "pan.___", state 1995, "cache_dirty_urcu_active_readers = 0"
+       line 411, "pan.___", state 1997, "(1)"
+       line 411, "pan.___", state 1998, "(cache_dirty_urcu_active_readers)"
+       line 411, "pan.___", state 1998, "else"
+       line 411, "pan.___", state 2001, "(1)"
+       line 411, "pan.___", state 2002, "(1)"
+       line 411, "pan.___", state 2002, "(1)"
+       line 409, "pan.___", state 2007, "((i<1))"
+       line 409, "pan.___", state 2007, "((i>=1))"
+       line 416, "pan.___", state 2013, "cache_dirty_rcu_ptr = 0"
+       line 416, "pan.___", state 2015, "(1)"
+       line 416, "pan.___", state 2016, "(cache_dirty_rcu_ptr)"
+       line 416, "pan.___", state 2016, "else"
+       line 416, "pan.___", state 2019, "(1)"
+       line 416, "pan.___", state 2020, "(1)"
+       line 416, "pan.___", state 2020, "(1)"
+       line 420, "pan.___", state 2027, "cache_dirty_rcu_data[i] = 0"
+       line 420, "pan.___", state 2029, "(1)"
+       line 420, "pan.___", state 2030, "(cache_dirty_rcu_data[i])"
+       line 420, "pan.___", state 2030, "else"
+       line 420, "pan.___", state 2033, "(1)"
+       line 420, "pan.___", state 2034, "(1)"
+       line 420, "pan.___", state 2034, "(1)"
+       line 418, "pan.___", state 2039, "((i<2))"
+       line 418, "pan.___", state 2039, "((i>=2))"
+       line 245, "pan.___", state 2045, "(1)"
+       line 249, "pan.___", state 2053, "(1)"
+       line 249, "pan.___", state 2054, "(!(cache_dirty_urcu_active_readers))"
+       line 249, "pan.___", state 2054, "else"
+       line 247, "pan.___", state 2059, "((i<1))"
+       line 247, "pan.___", state 2059, "((i>=1))"
+       line 253, "pan.___", state 2065, "(1)"
+       line 253, "pan.___", state 2066, "(!(cache_dirty_rcu_ptr))"
+       line 253, "pan.___", state 2066, "else"
+       line 257, "pan.___", state 2073, "(1)"
+       line 257, "pan.___", state 2074, "(!(cache_dirty_rcu_data[i]))"
+       line 257, "pan.___", state 2074, "else"
+       line 255, "pan.___", state 2079, "((i<2))"
+       line 255, "pan.___", state 2079, "((i>=2))"
+       line 262, "pan.___", state 2083, "(!(cache_dirty_urcu_gp_ctr))"
+       line 262, "pan.___", state 2083, "else"
+       line 427, "pan.___", state 2085, "(1)"
+       line 427, "pan.___", state 2085, "(1)"
+       line 734, "pan.___", state 2088, "cached_urcu_active_readers = (tmp+1)"
+       line 734, "pan.___", state 2089, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 734, "pan.___", state 2090, "(1)"
+       line 407, "pan.___", state 2097, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2129, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2143, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 2161, "(1)"
+       line 253, "pan.___", state 2181, "(1)"
+       line 257, "pan.___", state 2189, "(1)"
+       line 407, "pan.___", state 2214, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2246, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2260, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 2278, "(1)"
+       line 253, "pan.___", state 2298, "(1)"
+       line 257, "pan.___", state 2306, "(1)"
+       line 407, "pan.___", state 2325, "cache_dirty_urcu_gp_ctr = 0"
+       line 416, "pan.___", state 2357, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 2371, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 2389, "(1)"
+       line 253, "pan.___", state 2409, "(1)"
+       line 257, "pan.___", state 2417, "(1)"
+       line 245, "pan.___", state 2448, "(1)"
+       line 253, "pan.___", state 2468, "(1)"
+       line 257, "pan.___", state 2476, "(1)"
+       line 245, "pan.___", state 2491, "(1)"
+       line 253, "pan.___", state 2511, "(1)"
+       line 257, "pan.___", state 2519, "(1)"
+       line 929, "pan.___", state 2536, "-end-"
+       (221 of 2536 states)
+unreached in proctype urcu_writer
+       line 407, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 245, "pan.___", state 109, "(1)"
+       line 249, "pan.___", state 117, "(1)"
+       line 253, "pan.___", state 129, "(1)"
+       line 268, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 167, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 180, "cache_dirty_rcu_ptr = 0"
+       line 407, "pan.___", state 220, "cache_dirty_urcu_gp_ctr = 0"
+       line 411, "pan.___", state 234, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 252, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 266, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 284, "(1)"
+       line 249, "pan.___", state 292, "(1)"
+       line 253, "pan.___", state 304, "(1)"
+       line 257, "pan.___", state 312, "(1)"
+       line 411, "pan.___", state 347, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 365, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 379, "cache_dirty_rcu_data[i] = 0"
+       line 249, "pan.___", state 405, "(1)"
+       line 253, "pan.___", state 417, "(1)"
+       line 257, "pan.___", state 425, "(1)"
+       line 411, "pan.___", state 468, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 486, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 500, "cache_dirty_rcu_data[i] = 0"
+       line 249, "pan.___", state 526, "(1)"
+       line 253, "pan.___", state 538, "(1)"
+       line 257, "pan.___", state 546, "(1)"
+       line 411, "pan.___", state 579, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 597, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 611, "cache_dirty_rcu_data[i] = 0"
+       line 249, "pan.___", state 637, "(1)"
+       line 253, "pan.___", state 649, "(1)"
+       line 257, "pan.___", state 657, "(1)"
+       line 411, "pan.___", state 692, "cache_dirty_urcu_active_readers = 0"
+       line 416, "pan.___", state 710, "cache_dirty_rcu_ptr = 0"
+       line 420, "pan.___", state 724, "cache_dirty_rcu_data[i] = 0"
+       line 249, "pan.___", state 750, "(1)"
+       line 253, "pan.___", state 762, "(1)"
+       line 257, "pan.___", state 770, "(1)"
+       line 268, "pan.___", state 823, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 832, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 847, "(1)"
+       line 280, "pan.___", state 854, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 870, "(1)"
+       line 249, "pan.___", state 878, "(1)"
+       line 253, "pan.___", state 890, "(1)"
+       line 257, "pan.___", state 898, "(1)"
+       line 268, "pan.___", state 929, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 938, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 951, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 960, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 976, "(1)"
+       line 249, "pan.___", state 984, "(1)"
+       line 253, "pan.___", state 996, "(1)"
+       line 257, "pan.___", state 1004, "(1)"
+       line 272, "pan.___", state 1030, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1043, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1052, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1068, "(1)"
+       line 249, "pan.___", state 1076, "(1)"
+       line 253, "pan.___", state 1088, "(1)"
+       line 257, "pan.___", state 1096, "(1)"
+       line 268, "pan.___", state 1127, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1136, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1149, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1158, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1174, "(1)"
+       line 249, "pan.___", state 1182, "(1)"
+       line 253, "pan.___", state 1194, "(1)"
+       line 257, "pan.___", state 1202, "(1)"
+       line 272, "pan.___", state 1228, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1241, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1250, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1266, "(1)"
+       line 249, "pan.___", state 1274, "(1)"
+       line 253, "pan.___", state 1286, "(1)"
+       line 257, "pan.___", state 1294, "(1)"
+       line 268, "pan.___", state 1325, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1334, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1347, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1356, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1372, "(1)"
+       line 249, "pan.___", state 1380, "(1)"
+       line 253, "pan.___", state 1392, "(1)"
+       line 257, "pan.___", state 1400, "(1)"
+       line 272, "pan.___", state 1426, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1439, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1448, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1464, "(1)"
+       line 249, "pan.___", state 1472, "(1)"
+       line 253, "pan.___", state 1484, "(1)"
+       line 257, "pan.___", state 1492, "(1)"
+       line 268, "pan.___", state 1523, "cache_dirty_urcu_gp_ctr = 0"
+       line 272, "pan.___", state 1532, "cache_dirty_urcu_active_readers = 0"
+       line 276, "pan.___", state 1545, "cache_dirty_rcu_ptr = 0"
+       line 280, "pan.___", state 1554, "cache_dirty_rcu_data[i] = 0"
+       line 245, "pan.___", state 1570, "(1)"
+       line 249, "pan.___", state 1578, "(1)"
+       line 253, "pan.___", state 1590, "(1)"
+       line 257, "pan.___", state 1598, "(1)"
+       line 1304, "pan.___", state 1614, "-end-"
+       (103 of 1614 states)
+unreached in proctype :init:
+       (0 of 28 states)
+unreached in proctype :never:
+       line 1369, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 1.54e+04 seconds
+pan: rate 15645.145 states/second
+pan: avg transition delay 3.1947e-06 usec
+cp .input.spin urcu_progress_writer.spin.input
+cp .input.spin.trail urcu_progress_writer.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi-compress'
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer.spin.input b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer.spin.input
new file mode 100644 (file)
index 0000000..b353239
--- /dev/null
@@ -0,0 +1,1340 @@
+#define WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.define b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.define
new file mode 100644 (file)
index 0000000..8d304f5
--- /dev/null
@@ -0,0 +1,2 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.log b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.log
new file mode 100644 (file)
index 0000000..52db020
--- /dev/null
@@ -0,0 +1,465 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi-compress'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer_error.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DCOLLAPSE -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1363)
+depth 7: Claim reached state 9 (line 1368)
+depth 50: Claim reached state 9 (line 1367)
+Depth=    7070 States=    1e+06 Transitions= 1.16e+07 Memory=   492.912        t=   33.7 R=   3e+04
+Depth=    7070 States=    2e+06 Transitions= 2.31e+07 Memory=   521.233        t=     68 R=   3e+04
+Depth=    7070 States=    3e+06 Transitions= 3.43e+07 Memory=   549.358        t=    102 R=   3e+04
+pan: resizing hashtable to -w22..  done
+pan: acceptance cycle (at depth 1558)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+       + Compression
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 80 byte, depth reached 7070, errors: 1
+  1856640 states, stored (3.13554e+06 visited)
+ 32657997 states, matched
+ 35793542 transitions (= visited+matched)
+5.1088012e+08 atomic steps
+hash conflicts:  10885217 (resolved)
+
+Stats on memory usage (in Megabytes):
+  205.393      equivalent memory usage for states (stored*(State-vector + overhead))
+   93.456      actual memory usage for states (compression: 45.50%)
+               state-vector as stored = 17 byte + 36 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+  583.115      total actual memory usage
+
+nr of templates: [ globals chans procs ]
+collapse counts: [ 12368 744 254 2 2 ]
+unreached in proctype urcu_reader
+       line 269, "pan.___", state 57, "cache_dirty_urcu_gp_ctr = 0"
+       line 277, "pan.___", state 79, "cache_dirty_rcu_ptr = 0"
+       line 281, "pan.___", state 88, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 104, "(1)"
+       line 250, "pan.___", state 112, "(1)"
+       line 254, "pan.___", state 124, "(1)"
+       line 258, "pan.___", state 132, "(1)"
+       line 408, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 190, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 204, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 222, "(1)"
+       line 254, "pan.___", state 242, "(1)"
+       line 258, "pan.___", state 250, "(1)"
+       line 688, "pan.___", state 269, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 408, "pan.___", state 276, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 308, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 322, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 340, "(1)"
+       line 254, "pan.___", state 360, "(1)"
+       line 258, "pan.___", state 368, "(1)"
+       line 408, "pan.___", state 387, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 419, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 433, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 451, "(1)"
+       line 254, "pan.___", state 471, "(1)"
+       line 258, "pan.___", state 479, "(1)"
+       line 408, "pan.___", state 500, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 502, "(1)"
+       line 408, "pan.___", state 503, "(cache_dirty_urcu_gp_ctr)"
+       line 408, "pan.___", state 503, "else"
+       line 408, "pan.___", state 506, "(1)"
+       line 412, "pan.___", state 514, "cache_dirty_urcu_active_readers = 0"
+       line 412, "pan.___", state 516, "(1)"
+       line 412, "pan.___", state 517, "(cache_dirty_urcu_active_readers)"
+       line 412, "pan.___", state 517, "else"
+       line 412, "pan.___", state 520, "(1)"
+       line 412, "pan.___", state 521, "(1)"
+       line 412, "pan.___", state 521, "(1)"
+       line 410, "pan.___", state 526, "((i<1))"
+       line 410, "pan.___", state 526, "((i>=1))"
+       line 417, "pan.___", state 532, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 534, "(1)"
+       line 417, "pan.___", state 535, "(cache_dirty_rcu_ptr)"
+       line 417, "pan.___", state 535, "else"
+       line 417, "pan.___", state 538, "(1)"
+       line 417, "pan.___", state 539, "(1)"
+       line 417, "pan.___", state 539, "(1)"
+       line 421, "pan.___", state 546, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 548, "(1)"
+       line 421, "pan.___", state 549, "(cache_dirty_rcu_data[i])"
+       line 421, "pan.___", state 549, "else"
+       line 421, "pan.___", state 552, "(1)"
+       line 421, "pan.___", state 553, "(1)"
+       line 421, "pan.___", state 553, "(1)"
+       line 419, "pan.___", state 558, "((i<2))"
+       line 419, "pan.___", state 558, "((i>=2))"
+       line 246, "pan.___", state 564, "(1)"
+       line 250, "pan.___", state 572, "(1)"
+       line 250, "pan.___", state 573, "(!(cache_dirty_urcu_active_readers))"
+       line 250, "pan.___", state 573, "else"
+       line 248, "pan.___", state 578, "((i<1))"
+       line 248, "pan.___", state 578, "((i>=1))"
+       line 254, "pan.___", state 584, "(1)"
+       line 254, "pan.___", state 585, "(!(cache_dirty_rcu_ptr))"
+       line 254, "pan.___", state 585, "else"
+       line 258, "pan.___", state 592, "(1)"
+       line 258, "pan.___", state 593, "(!(cache_dirty_rcu_data[i]))"
+       line 258, "pan.___", state 593, "else"
+       line 256, "pan.___", state 598, "((i<2))"
+       line 256, "pan.___", state 598, "((i>=2))"
+       line 263, "pan.___", state 602, "(!(cache_dirty_urcu_gp_ctr))"
+       line 263, "pan.___", state 602, "else"
+       line 428, "pan.___", state 604, "(1)"
+       line 428, "pan.___", state 604, "(1)"
+       line 688, "pan.___", state 607, "cached_urcu_active_readers = (tmp+1)"
+       line 688, "pan.___", state 608, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 688, "pan.___", state 609, "(1)"
+       line 408, "pan.___", state 616, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 648, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 662, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 680, "(1)"
+       line 254, "pan.___", state 700, "(1)"
+       line 258, "pan.___", state 708, "(1)"
+       line 408, "pan.___", state 734, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 766, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 780, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 798, "(1)"
+       line 254, "pan.___", state 818, "(1)"
+       line 258, "pan.___", state 826, "(1)"
+       line 408, "pan.___", state 845, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 847, "(1)"
+       line 408, "pan.___", state 848, "(cache_dirty_urcu_gp_ctr)"
+       line 408, "pan.___", state 848, "else"
+       line 408, "pan.___", state 851, "(1)"
+       line 412, "pan.___", state 859, "cache_dirty_urcu_active_readers = 0"
+       line 412, "pan.___", state 861, "(1)"
+       line 412, "pan.___", state 862, "(cache_dirty_urcu_active_readers)"
+       line 412, "pan.___", state 862, "else"
+       line 412, "pan.___", state 865, "(1)"
+       line 412, "pan.___", state 866, "(1)"
+       line 412, "pan.___", state 866, "(1)"
+       line 410, "pan.___", state 871, "((i<1))"
+       line 410, "pan.___", state 871, "((i>=1))"
+       line 417, "pan.___", state 877, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 879, "(1)"
+       line 417, "pan.___", state 880, "(cache_dirty_rcu_ptr)"
+       line 417, "pan.___", state 880, "else"
+       line 417, "pan.___", state 883, "(1)"
+       line 417, "pan.___", state 884, "(1)"
+       line 417, "pan.___", state 884, "(1)"
+       line 421, "pan.___", state 891, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 893, "(1)"
+       line 421, "pan.___", state 894, "(cache_dirty_rcu_data[i])"
+       line 421, "pan.___", state 894, "else"
+       line 421, "pan.___", state 897, "(1)"
+       line 421, "pan.___", state 898, "(1)"
+       line 421, "pan.___", state 898, "(1)"
+       line 419, "pan.___", state 903, "((i<2))"
+       line 419, "pan.___", state 903, "((i>=2))"
+       line 246, "pan.___", state 909, "(1)"
+       line 250, "pan.___", state 917, "(1)"
+       line 250, "pan.___", state 918, "(!(cache_dirty_urcu_active_readers))"
+       line 250, "pan.___", state 918, "else"
+       line 248, "pan.___", state 923, "((i<1))"
+       line 248, "pan.___", state 923, "((i>=1))"
+       line 254, "pan.___", state 929, "(1)"
+       line 254, "pan.___", state 930, "(!(cache_dirty_rcu_ptr))"
+       line 254, "pan.___", state 930, "else"
+       line 258, "pan.___", state 937, "(1)"
+       line 258, "pan.___", state 938, "(!(cache_dirty_rcu_data[i]))"
+       line 258, "pan.___", state 938, "else"
+       line 256, "pan.___", state 943, "((i<2))"
+       line 256, "pan.___", state 943, "((i>=2))"
+       line 263, "pan.___", state 947, "(!(cache_dirty_urcu_gp_ctr))"
+       line 263, "pan.___", state 947, "else"
+       line 428, "pan.___", state 949, "(1)"
+       line 428, "pan.___", state 949, "(1)"
+       line 696, "pan.___", state 953, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 408, "pan.___", state 958, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 990, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 1004, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1022, "(1)"
+       line 254, "pan.___", state 1042, "(1)"
+       line 258, "pan.___", state 1050, "(1)"
+       line 408, "pan.___", state 1072, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 1104, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 1118, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1136, "(1)"
+       line 254, "pan.___", state 1156, "(1)"
+       line 258, "pan.___", state 1164, "(1)"
+       line 408, "pan.___", state 1187, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 1219, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 1233, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1251, "(1)"
+       line 254, "pan.___", state 1271, "(1)"
+       line 258, "pan.___", state 1279, "(1)"
+       line 408, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 1330, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 1344, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1362, "(1)"
+       line 254, "pan.___", state 1382, "(1)"
+       line 258, "pan.___", state 1390, "(1)"
+       line 408, "pan.___", state 1414, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 1446, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 1460, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1478, "(1)"
+       line 254, "pan.___", state 1498, "(1)"
+       line 258, "pan.___", state 1506, "(1)"
+       line 408, "pan.___", state 1525, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 1557, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 1571, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1589, "(1)"
+       line 254, "pan.___", state 1609, "(1)"
+       line 258, "pan.___", state 1617, "(1)"
+       line 408, "pan.___", state 1639, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 1671, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 1685, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1703, "(1)"
+       line 254, "pan.___", state 1723, "(1)"
+       line 258, "pan.___", state 1731, "(1)"
+       line 735, "pan.___", state 1750, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 408, "pan.___", state 1757, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 1789, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 1803, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1821, "(1)"
+       line 254, "pan.___", state 1841, "(1)"
+       line 258, "pan.___", state 1849, "(1)"
+       line 408, "pan.___", state 1868, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 1900, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 1914, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1932, "(1)"
+       line 254, "pan.___", state 1952, "(1)"
+       line 258, "pan.___", state 1960, "(1)"
+       line 408, "pan.___", state 1981, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 1983, "(1)"
+       line 408, "pan.___", state 1984, "(cache_dirty_urcu_gp_ctr)"
+       line 408, "pan.___", state 1984, "else"
+       line 408, "pan.___", state 1987, "(1)"
+       line 412, "pan.___", state 1995, "cache_dirty_urcu_active_readers = 0"
+       line 412, "pan.___", state 1997, "(1)"
+       line 412, "pan.___", state 1998, "(cache_dirty_urcu_active_readers)"
+       line 412, "pan.___", state 1998, "else"
+       line 412, "pan.___", state 2001, "(1)"
+       line 412, "pan.___", state 2002, "(1)"
+       line 412, "pan.___", state 2002, "(1)"
+       line 410, "pan.___", state 2007, "((i<1))"
+       line 410, "pan.___", state 2007, "((i>=1))"
+       line 417, "pan.___", state 2013, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 2015, "(1)"
+       line 417, "pan.___", state 2016, "(cache_dirty_rcu_ptr)"
+       line 417, "pan.___", state 2016, "else"
+       line 417, "pan.___", state 2019, "(1)"
+       line 417, "pan.___", state 2020, "(1)"
+       line 417, "pan.___", state 2020, "(1)"
+       line 421, "pan.___", state 2027, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 2029, "(1)"
+       line 421, "pan.___", state 2030, "(cache_dirty_rcu_data[i])"
+       line 421, "pan.___", state 2030, "else"
+       line 421, "pan.___", state 2033, "(1)"
+       line 421, "pan.___", state 2034, "(1)"
+       line 421, "pan.___", state 2034, "(1)"
+       line 419, "pan.___", state 2039, "((i<2))"
+       line 419, "pan.___", state 2039, "((i>=2))"
+       line 246, "pan.___", state 2045, "(1)"
+       line 250, "pan.___", state 2053, "(1)"
+       line 250, "pan.___", state 2054, "(!(cache_dirty_urcu_active_readers))"
+       line 250, "pan.___", state 2054, "else"
+       line 248, "pan.___", state 2059, "((i<1))"
+       line 248, "pan.___", state 2059, "((i>=1))"
+       line 254, "pan.___", state 2065, "(1)"
+       line 254, "pan.___", state 2066, "(!(cache_dirty_rcu_ptr))"
+       line 254, "pan.___", state 2066, "else"
+       line 258, "pan.___", state 2073, "(1)"
+       line 258, "pan.___", state 2074, "(!(cache_dirty_rcu_data[i]))"
+       line 258, "pan.___", state 2074, "else"
+       line 256, "pan.___", state 2079, "((i<2))"
+       line 256, "pan.___", state 2079, "((i>=2))"
+       line 263, "pan.___", state 2083, "(!(cache_dirty_urcu_gp_ctr))"
+       line 263, "pan.___", state 2083, "else"
+       line 428, "pan.___", state 2085, "(1)"
+       line 428, "pan.___", state 2085, "(1)"
+       line 735, "pan.___", state 2088, "cached_urcu_active_readers = (tmp+1)"
+       line 735, "pan.___", state 2089, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 735, "pan.___", state 2090, "(1)"
+       line 408, "pan.___", state 2097, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 2129, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 2143, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 2161, "(1)"
+       line 254, "pan.___", state 2181, "(1)"
+       line 258, "pan.___", state 2189, "(1)"
+       line 408, "pan.___", state 2214, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 2246, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 2260, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 2278, "(1)"
+       line 254, "pan.___", state 2298, "(1)"
+       line 258, "pan.___", state 2306, "(1)"
+       line 408, "pan.___", state 2325, "cache_dirty_urcu_gp_ctr = 0"
+       line 417, "pan.___", state 2357, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 2371, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 2389, "(1)"
+       line 254, "pan.___", state 2409, "(1)"
+       line 258, "pan.___", state 2417, "(1)"
+       line 246, "pan.___", state 2448, "(1)"
+       line 254, "pan.___", state 2468, "(1)"
+       line 258, "pan.___", state 2476, "(1)"
+       line 246, "pan.___", state 2491, "(1)"
+       line 254, "pan.___", state 2511, "(1)"
+       line 258, "pan.___", state 2519, "(1)"
+       line 930, "pan.___", state 2536, "-end-"
+       (221 of 2536 states)
+unreached in proctype urcu_writer
+       line 408, "pan.___", state 45, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, "pan.___", state 59, "cache_dirty_urcu_active_readers = 0"
+       line 417, "pan.___", state 77, "cache_dirty_rcu_ptr = 0"
+       line 246, "pan.___", state 109, "(1)"
+       line 250, "pan.___", state 117, "(1)"
+       line 254, "pan.___", state 129, "(1)"
+       line 269, "pan.___", state 158, "cache_dirty_urcu_gp_ctr = 0"
+       line 273, "pan.___", state 167, "cache_dirty_urcu_active_readers = 0"
+       line 277, "pan.___", state 180, "cache_dirty_rcu_ptr = 0"
+       line 408, "pan.___", state 220, "cache_dirty_urcu_gp_ctr = 0"
+       line 412, "pan.___", state 234, "cache_dirty_urcu_active_readers = 0"
+       line 417, "pan.___", state 252, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 266, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 284, "(1)"
+       line 250, "pan.___", state 292, "(1)"
+       line 254, "pan.___", state 304, "(1)"
+       line 258, "pan.___", state 312, "(1)"
+       line 412, "pan.___", state 347, "cache_dirty_urcu_active_readers = 0"
+       line 417, "pan.___", state 365, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 379, "cache_dirty_rcu_data[i] = 0"
+       line 250, "pan.___", state 405, "(1)"
+       line 254, "pan.___", state 417, "(1)"
+       line 258, "pan.___", state 425, "(1)"
+       line 408, "pan.___", state 450, "cache_dirty_urcu_gp_ctr = 0"
+       line 408, "pan.___", state 452, "(1)"
+       line 408, "pan.___", state 453, "(cache_dirty_urcu_gp_ctr)"
+       line 408, "pan.___", state 453, "else"
+       line 408, "pan.___", state 456, "(1)"
+       line 412, "pan.___", state 464, "cache_dirty_urcu_active_readers = 0"
+       line 412, "pan.___", state 466, "(1)"
+       line 412, "pan.___", state 467, "(cache_dirty_urcu_active_readers)"
+       line 412, "pan.___", state 467, "else"
+       line 412, "pan.___", state 470, "(1)"
+       line 412, "pan.___", state 471, "(1)"
+       line 412, "pan.___", state 471, "(1)"
+       line 410, "pan.___", state 476, "((i<1))"
+       line 410, "pan.___", state 476, "((i>=1))"
+       line 417, "pan.___", state 482, "cache_dirty_rcu_ptr = 0"
+       line 417, "pan.___", state 484, "(1)"
+       line 417, "pan.___", state 485, "(cache_dirty_rcu_ptr)"
+       line 417, "pan.___", state 485, "else"
+       line 417, "pan.___", state 488, "(1)"
+       line 417, "pan.___", state 489, "(1)"
+       line 417, "pan.___", state 489, "(1)"
+       line 421, "pan.___", state 496, "cache_dirty_rcu_data[i] = 0"
+       line 421, "pan.___", state 498, "(1)"
+       line 421, "pan.___", state 499, "(cache_dirty_rcu_data[i])"
+       line 421, "pan.___", state 499, "else"
+       line 421, "pan.___", state 502, "(1)"
+       line 421, "pan.___", state 503, "(1)"
+       line 421, "pan.___", state 503, "(1)"
+       line 419, "pan.___", state 508, "((i<2))"
+       line 419, "pan.___", state 508, "((i>=2))"
+       line 246, "pan.___", state 514, "(1)"
+       line 250, "pan.___", state 522, "(1)"
+       line 250, "pan.___", state 523, "(!(cache_dirty_urcu_active_readers))"
+       line 250, "pan.___", state 523, "else"
+       line 248, "pan.___", state 528, "((i<1))"
+       line 248, "pan.___", state 528, "((i>=1))"
+       line 254, "pan.___", state 534, "(1)"
+       line 254, "pan.___", state 535, "(!(cache_dirty_rcu_ptr))"
+       line 254, "pan.___", state 535, "else"
+       line 258, "pan.___", state 542, "(1)"
+       line 258, "pan.___", state 543, "(!(cache_dirty_rcu_data[i]))"
+       line 258, "pan.___", state 543, "else"
+       line 263, "pan.___", state 552, "(!(cache_dirty_urcu_gp_ctr))"
+       line 263, "pan.___", state 552, "else"
+       line 428, "pan.___", state 554, "(1)"
+       line 428, "pan.___", state 554, "(1)"
+       line 412, "pan.___", state 574, "cache_dirty_urcu_active_readers = 0"
+       line 417, "pan.___", state 592, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 606, "cache_dirty_rcu_data[i] = 0"
+       line 250, "pan.___", state 632, "(1)"
+       line 254, "pan.___", state 644, "(1)"
+       line 258, "pan.___", state 652, "(1)"
+       line 412, "pan.___", state 685, "cache_dirty_urcu_active_readers = 0"
+       line 417, "pan.___", state 703, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 717, "cache_dirty_rcu_data[i] = 0"
+       line 250, "pan.___", state 743, "(1)"
+       line 254, "pan.___", state 755, "(1)"
+       line 258, "pan.___", state 763, "(1)"
+       line 412, "pan.___", state 798, "cache_dirty_urcu_active_readers = 0"
+       line 417, "pan.___", state 816, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 830, "cache_dirty_rcu_data[i] = 0"
+       line 250, "pan.___", state 856, "(1)"
+       line 254, "pan.___", state 868, "(1)"
+       line 258, "pan.___", state 876, "(1)"
+       line 412, "pan.___", state 914, "cache_dirty_urcu_active_readers = 0"
+       line 417, "pan.___", state 932, "cache_dirty_rcu_ptr = 0"
+       line 421, "pan.___", state 946, "cache_dirty_rcu_data[i] = 0"
+       line 250, "pan.___", state 972, "(1)"
+       line 254, "pan.___", state 984, "(1)"
+       line 258, "pan.___", state 992, "(1)"
+       line 269, "pan.___", state 1036, "cache_dirty_urcu_gp_ctr = 0"
+       line 273, "pan.___", state 1045, "cache_dirty_urcu_active_readers = 0"
+       line 277, "pan.___", state 1060, "(1)"
+       line 281, "pan.___", state 1067, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1083, "(1)"
+       line 250, "pan.___", state 1091, "(1)"
+       line 254, "pan.___", state 1103, "(1)"
+       line 258, "pan.___", state 1111, "(1)"
+       line 269, "pan.___", state 1142, "cache_dirty_urcu_gp_ctr = 0"
+       line 273, "pan.___", state 1151, "cache_dirty_urcu_active_readers = 0"
+       line 277, "pan.___", state 1164, "cache_dirty_rcu_ptr = 0"
+       line 281, "pan.___", state 1173, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1189, "(1)"
+       line 250, "pan.___", state 1197, "(1)"
+       line 254, "pan.___", state 1209, "(1)"
+       line 258, "pan.___", state 1217, "(1)"
+       line 273, "pan.___", state 1243, "cache_dirty_urcu_active_readers = 0"
+       line 277, "pan.___", state 1256, "cache_dirty_rcu_ptr = 0"
+       line 281, "pan.___", state 1265, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1281, "(1)"
+       line 250, "pan.___", state 1289, "(1)"
+       line 254, "pan.___", state 1301, "(1)"
+       line 258, "pan.___", state 1309, "(1)"
+       line 269, "pan.___", state 1340, "cache_dirty_urcu_gp_ctr = 0"
+       line 273, "pan.___", state 1349, "cache_dirty_urcu_active_readers = 0"
+       line 277, "pan.___", state 1362, "cache_dirty_rcu_ptr = 0"
+       line 281, "pan.___", state 1371, "cache_dirty_rcu_data[i] = 0"
+       line 246, "pan.___", state 1387, "(1)"
+       line 250, "pan.___", state 1395, "(1)"
+       line 254, "pan.___", state 1407, "(1)"
+       line 258, "pan.___", state 1415, "(1)"
+       line 1305, "pan.___", state 1431, "-end-"
+       (110 of 1431 states)
+unreached in proctype :init:
+       (0 of 28 states)
+unreached in proctype :never:
+       line 1370, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 106 seconds
+pan: rate 29466.638 states/second
+pan: avg transition delay 2.9729e-06 usec
+cp .input.spin urcu_progress_writer_error.spin.input
+cp .input.spin.trail urcu_progress_writer_error.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi-compress'
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.spin.input b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.spin.input
new file mode 100644 (file)
index 0000000..6cccb27
--- /dev/null
@@ -0,0 +1,1341 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;
+
+#define DECLARE_PROC_CACHED_VAR(type, x)\
+       type cached_##x;                \
+       bit cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v)          \
+       mem_##x = v;
+
+#define INIT_PROC_CACHED_VAR(x, v)     \
+       cache_dirty_##x = 0;            \
+       cached_##x = v;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x)
+
+#define READ_CACHED_VAR(x)     (cached_##x)
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x = v;                         \
+               cache_dirty_##x = 1;                    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x;                   \
+               cache_dirty_##x = 0;                    \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x = mem_##x;   \
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_gp_ctr);
+       /* Note ! currently only one reader */
+       DECLARE_PROC_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+       /* RCU data */
+       DECLARE_PROC_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+       /* RCU pointer */
+#if (SLAB_SIZE == 2)
+       DECLARE_PROC_CACHED_VAR(bit, rcu_ptr);
+#else
+       DECLARE_PROC_CACHED_VAR(byte, rcu_ptr);
+#endif
+
+       atomic {
+               INIT_PROC_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_PROC_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_PROC_CACHED_VAR(urcu_active_readers[i], 0);
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_PROC_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_PROC_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1);
+               INIT_CACHED_VAR(rcu_ptr, 0);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.spin.input.trail b/formal-model/urcu-controldataflow-intel-ipi-compress/urcu_progress_writer_error.spin.input.trail
new file mode 100644 (file)
index 0000000..e9b06b3
--- /dev/null
@@ -0,0 +1,1699 @@
+-2:3:-2
+-4:-4:-4
+1:0:3997
+2:2:2536
+3:2:2541
+4:2:2545
+5:2:2553
+6:2:2557
+7:2:2561
+8:0:3997
+9:1:0
+10:1:5
+11:1:9
+12:1:17
+13:1:21
+14:1:25
+15:0:3997
+16:3:3967
+17:3:3970
+18:3:3977
+19:3:3984
+20:3:3987
+21:3:3991
+22:3:3992
+23:0:3997
+24:3:3994
+25:0:3997
+26:2:2565
+27:0:3997
+28:2:2571
+29:0:3997
+30:2:2572
+31:0:3997
+32:2:2573
+33:0:3997
+34:2:2574
+35:0:3997
+36:2:2575
+37:0:3997
+38:2:2576
+39:2:2577
+40:2:2581
+41:2:2582
+42:2:2590
+43:2:2591
+44:2:2595
+45:2:2596
+46:2:2604
+47:2:2609
+48:2:2613
+49:2:2614
+50:2:2622
+51:2:2623
+52:2:2627
+53:2:2628
+54:2:2622
+55:2:2623
+56:2:2627
+57:2:2628
+58:2:2636
+59:2:2641
+60:2:2648
+61:2:2649
+62:2:2656
+63:2:2661
+64:2:2668
+65:2:2669
+66:2:2668
+67:2:2669
+68:2:2676
+69:2:2686
+70:0:3997
+71:2:2575
+72:0:3997
+73:2:2690
+74:2:2694
+75:2:2695
+76:2:2699
+77:2:2703
+78:2:2704
+79:2:2708
+80:2:2716
+81:2:2717
+82:2:2721
+83:2:2725
+84:2:2726
+85:2:2721
+86:2:2722
+87:2:2730
+88:0:3997
+89:2:2575
+90:0:3997
+91:2:2738
+92:2:2739
+93:2:2740
+94:0:3997
+95:2:2575
+96:0:3997
+97:2:2745
+98:0:3997
+99:2:3572
+100:2:3573
+101:2:3577
+102:2:3581
+103:2:3582
+104:2:3586
+105:2:3591
+106:2:3599
+107:2:3603
+108:2:3604
+109:2:3599
+110:2:3603
+111:2:3604
+112:2:3608
+113:2:3615
+114:2:3622
+115:2:3623
+116:2:3630
+117:2:3635
+118:2:3642
+119:2:3643
+120:2:3642
+121:2:3643
+122:2:3650
+123:2:3654
+124:0:3997
+125:2:3659
+126:0:3997
+127:2:3660
+128:0:3997
+129:2:3661
+130:0:3997
+131:2:3662
+132:0:3997
+133:1:29
+134:0:3997
+135:1:35
+136:0:3997
+137:1:36
+138:0:3997
+139:2:3663
+140:0:3997
+141:1:37
+142:0:3997
+143:2:3662
+144:0:3997
+145:1:38
+146:0:3997
+147:2:3663
+148:0:3997
+149:1:39
+150:0:3997
+151:2:3662
+152:0:3997
+153:1:40
+154:0:3997
+155:2:3663
+156:0:3997
+157:1:41
+158:0:3997
+159:1:42
+160:0:3997
+161:1:43
+162:0:3997
+163:2:3662
+164:0:3997
+165:1:44
+166:0:3997
+167:2:3663
+168:0:3997
+169:1:53
+170:0:3997
+171:2:3662
+172:0:3997
+173:1:57
+174:1:58
+175:1:62
+176:1:66
+177:1:67
+178:1:71
+179:1:79
+180:1:80
+181:1:84
+182:1:88
+183:1:89
+184:1:84
+185:1:88
+186:1:89
+187:1:93
+188:1:100
+189:1:107
+190:1:108
+191:1:115
+192:1:120
+193:1:127
+194:1:128
+195:1:127
+196:1:128
+197:1:135
+198:1:139
+199:0:3997
+200:2:3663
+201:0:3997
+202:1:144
+203:0:3997
+204:2:3664
+205:0:3997
+206:2:3669
+207:0:3997
+208:2:3670
+209:0:3997
+210:2:3678
+211:2:3679
+212:2:3683
+213:2:3687
+214:2:3688
+215:2:3692
+216:2:3700
+217:2:3701
+218:2:3705
+219:2:3709
+220:2:3710
+221:2:3705
+222:2:3709
+223:2:3710
+224:2:3714
+225:2:3721
+226:2:3728
+227:2:3729
+228:2:3736
+229:2:3741
+230:2:3748
+231:2:3749
+232:2:3748
+233:2:3749
+234:2:3756
+235:2:3760
+236:0:3997
+237:2:2747
+238:2:3553
+239:0:3997
+240:2:2575
+241:0:3997
+242:2:2748
+243:0:3997
+244:2:2575
+245:0:3997
+246:2:2751
+247:2:2752
+248:2:2756
+249:2:2757
+250:2:2765
+251:2:2766
+252:2:2770
+253:2:2771
+254:2:2779
+255:2:2784
+256:2:2788
+257:2:2789
+258:2:2797
+259:2:2798
+260:2:2802
+261:2:2803
+262:2:2797
+263:2:2798
+264:2:2802
+265:2:2803
+266:2:2811
+267:2:2816
+268:2:2823
+269:2:2824
+270:2:2831
+271:2:2836
+272:2:2843
+273:2:2844
+274:2:2843
+275:2:2844
+276:2:2851
+277:2:2860
+278:0:3997
+279:2:2575
+280:0:3997
+281:2:2864
+282:2:2865
+283:2:2866
+284:2:2878
+285:2:2879
+286:2:2883
+287:2:2884
+288:2:2892
+289:2:2897
+290:2:2901
+291:2:2902
+292:2:2910
+293:2:2911
+294:2:2915
+295:2:2916
+296:2:2910
+297:2:2911
+298:2:2915
+299:2:2916
+300:2:2924
+301:2:2929
+302:2:2936
+303:2:2937
+304:2:2944
+305:2:2949
+306:2:2956
+307:2:2957
+308:2:2956
+309:2:2957
+310:2:2964
+311:2:2977
+312:2:2978
+313:0:3997
+314:2:2575
+315:0:3997
+316:2:3091
+317:2:3092
+318:2:3096
+319:2:3097
+320:2:3105
+321:2:3106
+322:2:3110
+323:2:3111
+324:2:3119
+325:2:3124
+326:2:3128
+327:2:3129
+328:2:3137
+329:2:3138
+330:2:3142
+331:2:3143
+332:2:3137
+333:2:3138
+334:2:3142
+335:2:3143
+336:2:3151
+337:2:3156
+338:2:3163
+339:2:3164
+340:2:3171
+341:2:3176
+342:2:3183
+343:2:3184
+344:2:3183
+345:2:3184
+346:2:3191
+347:0:3997
+348:2:2575
+349:0:3997
+350:2:3202
+351:2:3203
+352:2:3207
+353:2:3208
+354:2:3216
+355:2:3217
+356:2:3221
+357:2:3222
+358:2:3230
+359:2:3235
+360:2:3239
+361:2:3240
+362:2:3248
+363:2:3249
+364:2:3253
+365:2:3254
+366:2:3248
+367:2:3249
+368:2:3253
+369:2:3254
+370:2:3262
+371:2:3267
+372:2:3274
+373:2:3275
+374:2:3282
+375:2:3287
+376:2:3294
+377:2:3295
+378:2:3294
+379:2:3295
+380:2:3302
+381:2:3311
+382:0:3997
+383:2:2575
+384:0:3997
+385:2:3315
+386:2:3316
+387:2:3317
+388:2:3329
+389:2:3330
+390:2:3334
+391:2:3335
+392:2:3343
+393:2:3348
+394:2:3352
+395:2:3353
+396:2:3361
+397:2:3362
+398:2:3366
+399:2:3367
+400:2:3361
+401:2:3362
+402:2:3366
+403:2:3367
+404:2:3375
+405:2:3380
+406:2:3387
+407:2:3388
+408:2:3395
+409:2:3400
+410:2:3407
+411:2:3408
+412:2:3407
+413:2:3408
+414:2:3415
+415:2:3427
+416:2:3428
+417:0:3997
+418:2:2575
+419:0:3997
+420:2:3541
+421:0:3997
+422:2:3770
+423:2:3771
+424:2:3775
+425:2:3779
+426:2:3780
+427:2:3784
+428:2:3792
+429:2:3793
+430:2:3797
+431:2:3801
+432:2:3802
+433:2:3797
+434:2:3801
+435:2:3802
+436:2:3806
+437:2:3813
+438:2:3820
+439:2:3821
+440:2:3828
+441:2:3833
+442:2:3840
+443:2:3841
+444:2:3840
+445:2:3841
+446:2:3848
+447:2:3852
+448:0:3997
+449:2:3857
+450:0:3997
+451:2:3858
+452:0:3997
+453:2:3859
+454:0:3997
+455:2:3860
+456:0:3997
+457:1:53
+458:0:3997
+459:2:3861
+460:0:3997
+461:1:57
+462:1:58
+463:1:62
+464:1:66
+465:1:67
+466:1:71
+467:1:79
+468:1:80
+469:1:84
+470:1:88
+471:1:89
+472:1:84
+473:1:88
+474:1:89
+475:1:93
+476:1:100
+477:1:107
+478:1:108
+479:1:115
+480:1:120
+481:1:127
+482:1:128
+483:1:127
+484:1:128
+485:1:135
+486:1:139
+487:0:3997
+488:2:3860
+489:0:3997
+490:1:144
+491:0:3997
+492:2:3861
+493:0:3997
+494:2:3862
+495:0:3997
+496:2:3867
+497:0:3997
+498:2:3868
+499:0:3997
+500:2:3876
+501:2:3877
+502:2:3881
+503:2:3885
+504:2:3886
+505:2:3890
+506:2:3898
+507:2:3899
+508:2:3903
+509:2:3907
+510:2:3908
+511:2:3903
+512:2:3907
+513:2:3908
+514:2:3912
+515:2:3919
+516:2:3926
+517:2:3927
+518:2:3934
+519:2:3939
+520:2:3946
+521:2:3947
+522:2:3946
+523:2:3947
+524:2:3954
+525:2:3958
+526:0:3997
+527:2:3543
+528:2:3553
+529:0:3997
+530:2:2575
+531:0:3997
+532:2:3544
+533:2:3545
+534:0:3997
+535:2:2575
+536:0:3997
+537:2:3549
+538:0:3997
+539:2:3557
+540:0:3997
+541:2:2572
+542:0:3997
+543:2:2573
+544:0:3997
+545:2:2574
+546:0:3997
+547:2:2575
+548:0:3997
+549:2:2576
+550:2:2577
+551:2:2581
+552:2:2582
+553:2:2590
+554:2:2591
+555:2:2595
+556:2:2596
+557:2:2604
+558:2:2609
+559:2:2613
+560:2:2614
+561:2:2622
+562:2:2623
+563:2:2624
+564:2:2622
+565:2:2623
+566:2:2627
+567:2:2628
+568:2:2636
+569:2:2641
+570:2:2648
+571:2:2649
+572:2:2656
+573:2:2661
+574:2:2668
+575:2:2669
+576:2:2668
+577:2:2669
+578:2:2676
+579:2:2686
+580:0:3997
+581:2:2575
+582:0:3997
+583:2:2690
+584:2:2694
+585:2:2695
+586:2:2699
+587:2:2703
+588:2:2704
+589:2:2708
+590:2:2716
+591:2:2717
+592:2:2721
+593:2:2722
+594:2:2721
+595:2:2725
+596:2:2726
+597:2:2730
+598:0:3997
+599:2:2575
+600:0:3997
+601:2:2738
+602:2:2739
+603:2:2740
+604:0:3997
+605:2:2575
+606:0:3997
+607:2:2745
+608:0:3997
+609:2:3572
+610:2:3573
+611:2:3577
+612:2:3581
+613:2:3582
+614:2:3586
+615:2:3591
+616:2:3599
+617:2:3603
+618:2:3604
+619:2:3599
+620:2:3603
+621:2:3604
+622:2:3608
+623:2:3615
+624:2:3622
+625:2:3623
+626:2:3630
+627:2:3635
+628:2:3642
+629:2:3643
+630:2:3642
+631:2:3643
+632:2:3650
+633:2:3654
+634:0:3997
+635:2:3659
+636:0:3997
+637:2:3660
+638:0:3997
+639:2:3661
+640:0:3997
+641:2:3662
+642:0:3997
+643:1:53
+644:0:3997
+645:2:3663
+646:0:3997
+647:1:57
+648:1:58
+649:1:62
+650:1:66
+651:1:67
+652:1:71
+653:1:79
+654:1:80
+655:1:84
+656:1:88
+657:1:89
+658:1:84
+659:1:88
+660:1:89
+661:1:93
+662:1:100
+663:1:107
+664:1:108
+665:1:115
+666:1:120
+667:1:127
+668:1:128
+669:1:127
+670:1:128
+671:1:135
+672:1:139
+673:0:3997
+674:2:3662
+675:0:3997
+676:1:144
+677:0:3997
+678:2:3663
+679:0:3997
+680:2:3664
+681:0:3997
+682:2:3669
+683:0:3997
+684:2:3670
+685:0:3997
+686:2:3678
+687:2:3679
+688:2:3683
+689:2:3687
+690:2:3688
+691:2:3692
+692:2:3700
+693:2:3701
+694:2:3705
+695:2:3709
+696:2:3710
+697:2:3705
+698:2:3709
+699:2:3710
+700:2:3714
+701:2:3721
+702:2:3728
+703:2:3729
+704:2:3736
+705:2:3741
+706:2:3748
+707:2:3749
+708:2:3748
+709:2:3749
+710:2:3756
+711:2:3760
+712:0:3997
+713:2:2747
+714:2:3553
+715:0:3997
+716:2:2575
+717:0:3997
+718:2:2748
+719:0:3997
+720:2:2575
+721:0:3997
+722:2:2751
+723:2:2752
+724:2:2756
+725:2:2757
+726:2:2765
+727:2:2766
+728:2:2770
+729:2:2771
+730:2:2779
+731:2:2784
+732:2:2788
+733:2:2789
+734:2:2797
+735:2:2798
+736:2:2802
+737:2:2803
+738:2:2797
+739:2:2798
+740:2:2802
+741:2:2803
+742:2:2811
+743:2:2816
+744:2:2823
+745:2:2824
+746:2:2831
+747:2:2836
+748:2:2843
+749:2:2844
+750:2:2843
+751:2:2844
+752:2:2851
+753:2:2860
+754:0:3997
+755:2:2575
+756:0:3997
+757:2:2864
+758:2:2865
+759:2:2866
+760:2:2878
+761:2:2879
+762:2:2883
+763:2:2884
+764:2:2892
+765:2:2897
+766:2:2901
+767:2:2902
+768:2:2910
+769:2:2911
+770:2:2915
+771:2:2916
+772:2:2910
+773:2:2911
+774:2:2915
+775:2:2916
+776:2:2924
+777:2:2929
+778:2:2936
+779:2:2937
+780:2:2944
+781:2:2949
+782:2:2956
+783:2:2957
+784:2:2956
+785:2:2957
+786:2:2964
+787:2:2977
+788:2:2978
+789:0:3997
+790:2:2575
+791:0:3997
+792:2:3091
+793:2:3092
+794:2:3096
+795:2:3097
+796:2:3105
+797:2:3106
+798:2:3110
+799:2:3111
+800:2:3119
+801:2:3124
+802:2:3128
+803:2:3129
+804:2:3137
+805:2:3138
+806:2:3142
+807:2:3143
+808:2:3137
+809:2:3138
+810:2:3142
+811:2:3143
+812:2:3151
+813:2:3156
+814:2:3163
+815:2:3164
+816:2:3171
+817:2:3176
+818:2:3183
+819:2:3184
+820:2:3183
+821:2:3184
+822:2:3191
+823:0:3997
+824:2:2575
+825:0:3997
+826:2:3202
+827:2:3203
+828:2:3207
+829:2:3208
+830:2:3216
+831:2:3217
+832:2:3221
+833:2:3222
+834:2:3230
+835:2:3235
+836:2:3239
+837:2:3240
+838:2:3248
+839:2:3249
+840:2:3253
+841:2:3254
+842:2:3248
+843:2:3249
+844:2:3253
+845:2:3254
+846:2:3262
+847:2:3267
+848:2:3274
+849:2:3275
+850:2:3282
+851:2:3287
+852:2:3294
+853:2:3295
+854:2:3294
+855:2:3295
+856:2:3302
+857:2:3311
+858:0:3997
+859:2:2575
+860:0:3997
+861:2:3315
+862:2:3316
+863:2:3317
+864:2:3329
+865:2:3330
+866:2:3334
+867:2:3335
+868:2:3343
+869:2:3348
+870:2:3352
+871:2:3353
+872:2:3361
+873:2:3362
+874:2:3366
+875:2:3367
+876:2:3361
+877:2:3362
+878:2:3366
+879:2:3367
+880:2:3375
+881:2:3380
+882:2:3387
+883:2:3388
+884:2:3395
+885:2:3400
+886:2:3407
+887:2:3408
+888:2:3407
+889:2:3408
+890:2:3415
+891:2:3427
+892:2:3428
+893:0:3997
+894:2:2575
+895:0:3997
+896:2:3541
+897:0:3997
+898:2:3770
+899:2:3771
+900:2:3775
+901:2:3779
+902:2:3780
+903:2:3784
+904:2:3792
+905:2:3793
+906:2:3797
+907:2:3801
+908:2:3802
+909:2:3797
+910:2:3801
+911:2:3802
+912:2:3806
+913:2:3813
+914:2:3820
+915:2:3821
+916:2:3828
+917:2:3833
+918:2:3840
+919:2:3841
+920:2:3840
+921:2:3841
+922:2:3848
+923:2:3852
+924:0:3997
+925:2:3857
+926:0:3997
+927:2:3858
+928:0:3997
+929:2:3859
+930:0:3997
+931:2:3860
+932:0:3997
+933:1:53
+934:0:3997
+935:2:3861
+936:0:3997
+937:1:57
+938:1:58
+939:1:62
+940:1:66
+941:1:67
+942:1:71
+943:1:79
+944:1:80
+945:1:84
+946:1:88
+947:1:89
+948:1:84
+949:1:88
+950:1:89
+951:1:93
+952:1:100
+953:1:107
+954:1:108
+955:1:115
+956:1:120
+957:1:127
+958:1:128
+959:1:127
+960:1:128
+961:1:135
+962:1:139
+963:0:3997
+964:2:3860
+965:0:3997
+966:1:144
+967:0:3997
+968:2:3861
+969:0:3997
+970:2:3862
+971:0:3997
+972:2:3867
+973:0:3997
+974:2:3868
+975:0:3997
+976:2:3876
+977:2:3877
+978:2:3881
+979:2:3885
+980:2:3886
+981:2:3890
+982:2:3898
+983:2:3899
+984:2:3903
+985:2:3907
+986:2:3908
+987:2:3903
+988:2:3907
+989:2:3908
+990:2:3912
+991:2:3919
+992:2:3926
+993:2:3927
+994:2:3934
+995:2:3939
+996:2:3946
+997:2:3947
+998:2:3946
+999:2:3947
+1000:2:3954
+1001:2:3958
+1002:0:3997
+1003:2:3543
+1004:2:3553
+1005:0:3997
+1006:2:2575
+1007:0:3997
+1008:2:3544
+1009:2:3545
+1010:0:3997
+1011:2:2575
+1012:0:3997
+1013:2:3549
+1014:0:3997
+1015:2:3557
+1016:0:3997
+1017:2:2572
+1018:0:3997
+1019:2:2573
+1020:0:3997
+1021:2:2574
+1022:0:3997
+1023:2:2575
+1024:0:3997
+1025:2:2576
+1026:2:2577
+1027:2:2581
+1028:2:2582
+1029:2:2590
+1030:2:2591
+1031:2:2595
+1032:2:2596
+1033:2:2604
+1034:2:2609
+1035:2:2613
+1036:2:2614
+1037:2:2622
+1038:2:2623
+1039:2:2627
+1040:2:2628
+1041:2:2622
+1042:2:2623
+1043:2:2624
+1044:2:2636
+1045:2:2641
+1046:2:2648
+1047:2:2649
+1048:2:2656
+1049:2:2661
+1050:2:2668
+1051:2:2669
+1052:2:2668
+1053:2:2669
+1054:2:2676
+1055:2:2686
+1056:0:3997
+1057:2:2575
+1058:0:3997
+1059:2:2690
+1060:2:2694
+1061:2:2695
+1062:2:2699
+1063:2:2703
+1064:2:2704
+1065:2:2708
+1066:2:2716
+1067:2:2717
+1068:2:2721
+1069:2:2725
+1070:2:2726
+1071:2:2721
+1072:2:2722
+1073:2:2730
+1074:0:3997
+1075:2:2575
+1076:0:3997
+1077:2:2738
+1078:2:2739
+1079:2:2740
+1080:0:3997
+1081:2:2575
+1082:0:3997
+1083:2:2745
+1084:0:3997
+1085:2:3572
+1086:2:3573
+1087:2:3577
+1088:2:3581
+1089:2:3582
+1090:2:3586
+1091:2:3591
+1092:2:3599
+1093:2:3603
+1094:2:3604
+1095:2:3599
+1096:2:3603
+1097:2:3604
+1098:2:3608
+1099:2:3615
+1100:2:3622
+1101:2:3623
+1102:2:3630
+1103:2:3635
+1104:2:3642
+1105:2:3643
+1106:2:3642
+1107:2:3643
+1108:2:3650
+1109:2:3654
+1110:0:3997
+1111:2:3659
+1112:0:3997
+1113:2:3660
+1114:0:3997
+1115:2:3661
+1116:0:3997
+1117:2:3662
+1118:0:3997
+1119:1:53
+1120:0:3997
+1121:2:3663
+1122:0:3997
+1123:1:57
+1124:1:58
+1125:1:62
+1126:1:66
+1127:1:67
+1128:1:71
+1129:1:79
+1130:1:80
+1131:1:84
+1132:1:88
+1133:1:89
+1134:1:84
+1135:1:88
+1136:1:89
+1137:1:93
+1138:1:100
+1139:1:107
+1140:1:108
+1141:1:115
+1142:1:120
+1143:1:127
+1144:1:128
+1145:1:127
+1146:1:128
+1147:1:135
+1148:1:139
+1149:0:3997
+1150:2:3662
+1151:0:3997
+1152:1:144
+1153:0:3997
+1154:2:3663
+1155:0:3997
+1156:2:3664
+1157:0:3997
+1158:2:3669
+1159:0:3997
+1160:2:3670
+1161:0:3997
+1162:2:3678
+1163:2:3679
+1164:2:3683
+1165:2:3687
+1166:2:3688
+1167:2:3692
+1168:2:3700
+1169:2:3701
+1170:2:3705
+1171:2:3709
+1172:2:3710
+1173:2:3705
+1174:2:3709
+1175:2:3710
+1176:2:3714
+1177:2:3721
+1178:2:3728
+1179:2:3729
+1180:2:3736
+1181:2:3741
+1182:2:3748
+1183:2:3749
+1184:2:3748
+1185:2:3749
+1186:2:3756
+1187:2:3760
+1188:0:3997
+1189:2:2747
+1190:2:3553
+1191:0:3997
+1192:2:2575
+1193:0:3997
+1194:2:2748
+1195:0:3997
+1196:2:2575
+1197:0:3997
+1198:2:2751
+1199:2:2752
+1200:2:2756
+1201:2:2757
+1202:2:2765
+1203:2:2766
+1204:2:2770
+1205:2:2771
+1206:2:2779
+1207:2:2784
+1208:2:2788
+1209:2:2789
+1210:2:2797
+1211:2:2798
+1212:2:2802
+1213:2:2803
+1214:2:2797
+1215:2:2798
+1216:2:2802
+1217:2:2803
+1218:2:2811
+1219:2:2816
+1220:2:2823
+1221:2:2824
+1222:2:2831
+1223:2:2836
+1224:2:2843
+1225:2:2844
+1226:2:2843
+1227:2:2844
+1228:2:2851
+1229:2:2860
+1230:0:3997
+1231:2:2575
+1232:0:3997
+1233:2:2864
+1234:2:2865
+1235:2:2866
+1236:2:2878
+1237:2:2879
+1238:2:2883
+1239:2:2884
+1240:2:2892
+1241:2:2897
+1242:2:2901
+1243:2:2902
+1244:2:2910
+1245:2:2911
+1246:2:2915
+1247:2:2916
+1248:2:2910
+1249:2:2911
+1250:2:2915
+1251:2:2916
+1252:2:2924
+1253:2:2929
+1254:2:2936
+1255:2:2937
+1256:2:2944
+1257:2:2949
+1258:2:2956
+1259:2:2957
+1260:2:2956
+1261:2:2957
+1262:2:2964
+1263:2:2977
+1264:2:2978
+1265:0:3997
+1266:2:2575
+1267:0:3997
+1268:2:3091
+1269:2:3092
+1270:2:3096
+1271:2:3097
+1272:2:3105
+1273:2:3106
+1274:2:3110
+1275:2:3111
+1276:2:3119
+1277:2:3124
+1278:2:3128
+1279:2:3129
+1280:2:3137
+1281:2:3138
+1282:2:3142
+1283:2:3143
+1284:2:3137
+1285:2:3138
+1286:2:3142
+1287:2:3143
+1288:2:3151
+1289:2:3156
+1290:2:3163
+1291:2:3164
+1292:2:3171
+1293:2:3176
+1294:2:3183
+1295:2:3184
+1296:2:3183
+1297:2:3184
+1298:2:3191
+1299:0:3997
+1300:2:2575
+1301:0:3997
+1302:2:3202
+1303:2:3203
+1304:2:3207
+1305:2:3208
+1306:2:3216
+1307:2:3217
+1308:2:3221
+1309:2:3222
+1310:2:3230
+1311:2:3235
+1312:2:3239
+1313:2:3240
+1314:2:3248
+1315:2:3249
+1316:2:3253
+1317:2:3254
+1318:2:3248
+1319:2:3249
+1320:2:3253
+1321:2:3254
+1322:2:3262
+1323:2:3267
+1324:2:3274
+1325:2:3275
+1326:2:3282
+1327:2:3287
+1328:2:3294
+1329:2:3295
+1330:2:3294
+1331:2:3295
+1332:2:3302
+1333:2:3311
+1334:0:3997
+1335:2:2575
+1336:0:3997
+1337:1:145
+1338:0:3997
+1339:1:147
+1340:0:3997
+1341:1:46
+1342:0:3997
+1343:1:153
+1344:1:154
+1345:1:158
+1346:1:159
+1347:1:167
+1348:1:168
+1349:1:172
+1350:1:173
+1351:1:181
+1352:1:186
+1353:1:190
+1354:1:191
+1355:1:199
+1356:1:200
+1357:1:204
+1358:1:205
+1359:1:199
+1360:1:200
+1361:1:204
+1362:1:205
+1363:1:213
+1364:1:218
+1365:1:225
+1366:1:226
+1367:1:233
+1368:1:238
+1369:1:245
+1370:1:246
+1371:1:245
+1372:1:246
+1373:1:253
+1374:0:3997
+1375:1:42
+1376:0:3997
+1377:1:43
+1378:0:3997
+1379:1:44
+1380:0:3997
+1381:1:145
+1382:0:3997
+1383:1:147
+1384:0:3997
+1385:1:46
+1386:0:3997
+1387:1:264
+1388:1:265
+1389:0:3997
+1390:1:42
+1391:0:3997
+1392:1:43
+1393:0:3997
+1394:1:44
+1395:0:3997
+1396:1:145
+1397:0:3997
+1398:1:147
+1399:0:3997
+1400:1:46
+1401:0:3997
+1402:1:271
+1403:1:272
+1404:1:276
+1405:1:277
+1406:1:285
+1407:1:286
+1408:1:290
+1409:1:291
+1410:1:299
+1411:1:304
+1412:1:308
+1413:1:309
+1414:1:317
+1415:1:318
+1416:1:322
+1417:1:323
+1418:1:317
+1419:1:318
+1420:1:322
+1421:1:323
+1422:1:331
+1423:1:336
+1424:1:343
+1425:1:344
+1426:1:351
+1427:1:356
+1428:1:363
+1429:1:364
+1430:1:363
+1431:1:364
+1432:1:371
+1433:0:3997
+1434:1:42
+1435:0:3997
+1436:1:43
+1437:0:3997
+1438:1:44
+1439:0:3997
+1440:1:145
+1441:0:3997
+1442:1:147
+1443:0:3997
+1444:1:46
+1445:0:3997
+1446:1:382
+1447:1:383
+1448:1:387
+1449:1:388
+1450:1:396
+1451:1:397
+1452:1:401
+1453:1:402
+1454:1:410
+1455:1:415
+1456:1:419
+1457:1:420
+1458:1:428
+1459:1:429
+1460:1:433
+1461:1:434
+1462:1:428
+1463:1:429
+1464:1:433
+1465:1:434
+1466:1:442
+1467:1:447
+1468:1:454
+1469:1:455
+1470:1:462
+1471:1:467
+1472:1:474
+1473:1:475
+1474:1:474
+1475:1:475
+1476:1:482
+1477:1:491
+1478:0:3997
+1479:1:42
+1480:0:3997
+1481:1:43
+1482:0:3997
+1483:1:44
+1484:0:3997
+1485:1:145
+1486:0:3997
+1487:1:147
+1488:0:3995
+1489:1:46
+1490:0:4001
+1491:1:1067
+1492:1:1068
+1493:1:1072
+1494:1:1073
+1495:1:1081
+1496:1:1082
+1497:1:1083
+1498:1:1095
+1499:1:1100
+1500:1:1104
+1501:1:1105
+1502:1:1113
+1503:1:1114
+1504:1:1118
+1505:1:1119
+1506:1:1113
+1507:1:1114
+1508:1:1118
+1509:1:1119
+1510:1:1127
+1511:1:1132
+1512:1:1139
+1513:1:1140
+1514:1:1147
+1515:1:1152
+1516:1:1159
+1517:1:1160
+1518:1:1159
+1519:1:1160
+1520:1:1167
+1521:0:4001
+1522:1:42
+1523:0:4001
+1524:1:43
+1525:0:4001
+1526:2:3315
+1527:2:3316
+1528:2:3317
+1529:2:3329
+1530:2:3330
+1531:2:3334
+1532:2:3335
+1533:2:3343
+1534:2:3348
+1535:2:3352
+1536:2:3353
+1537:2:3361
+1538:2:3362
+1539:2:3366
+1540:2:3367
+1541:2:3361
+1542:2:3362
+1543:2:3366
+1544:2:3367
+1545:2:3375
+1546:2:3380
+1547:2:3387
+1548:2:3388
+1549:2:3395
+1550:2:3400
+1551:2:3407
+1552:2:3408
+1553:2:3407
+1554:2:3408
+1555:2:3415
+1556:2:3425
+1557:0:4001
+1558:2:2575
+-1:-1:-1
+1559:0:4001
+1560:2:3431
+1561:2:3432
+1562:2:3436
+1563:2:3437
+1564:2:3445
+1565:2:3446
+1566:2:3450
+1567:2:3451
+1568:2:3459
+1569:2:3464
+1570:2:3468
+1571:2:3469
+1572:2:3477
+1573:2:3478
+1574:2:3482
+1575:2:3483
+1576:2:3477
+1577:2:3478
+1578:2:3482
+1579:2:3483
+1580:2:3491
+1581:2:3496
+1582:2:3503
+1583:2:3504
+1584:2:3511
+1585:2:3516
+1586:2:3523
+1587:2:3524
+1588:2:3523
+1589:2:3524
+1590:2:3531
+1591:0:4001
+1592:2:2575
+1593:0:4001
+1594:2:3315
+1595:2:3316
+1596:2:3320
+1597:2:3321
+1598:2:3329
+1599:2:3330
+1600:2:3334
+1601:2:3335
+1602:2:3343
+1603:2:3348
+1604:2:3352
+1605:2:3353
+1606:2:3361
+1607:2:3362
+1608:2:3366
+1609:2:3367
+1610:2:3361
+1611:2:3362
+1612:2:3366
+1613:2:3367
+1614:2:3375
+1615:2:3380
+1616:2:3387
+1617:2:3388
+1618:2:3395
+1619:2:3400
+1620:2:3407
+1621:2:3408
+1622:2:3407
+1623:2:3408
+1624:2:3415
+1625:2:3425
+1626:0:4001
+1627:2:2575
+1628:0:4001
+1629:2:3431
+1630:2:3432
+1631:2:3436
+1632:2:3437
+1633:2:3445
+1634:2:3446
+1635:2:3450
+1636:2:3451
+1637:2:3459
+1638:2:3464
+1639:2:3468
+1640:2:3469
+1641:2:3477
+1642:2:3478
+1643:2:3482
+1644:2:3483
+1645:2:3477
+1646:2:3478
+1647:2:3482
+1648:2:3483
+1649:2:3491
+1650:2:3496
+1651:2:3503
+1652:2:3504
+1653:2:3511
+1654:2:3516
+1655:2:3523
+1656:2:3524
+1657:2:3523
+1658:2:3524
+1659:2:3531
+1660:0:4001
+1661:2:2575
+1662:0:4001
+1663:2:3315
+1664:2:3316
+1665:2:3320
+1666:2:3321
+1667:2:3329
+1668:2:3330
+1669:2:3334
+1670:2:3335
+1671:2:3343
+1672:2:3348
+1673:2:3352
+1674:2:3353
+1675:2:3361
+1676:2:3362
+1677:2:3366
+1678:2:3367
+1679:2:3361
+1680:2:3362
+1681:2:3366
+1682:2:3367
+1683:2:3375
+1684:2:3380
+1685:2:3387
+1686:2:3388
+1687:2:3395
+1688:2:3400
+1689:2:3407
+1690:2:3408
+1691:2:3407
+1692:2:3408
+1693:2:3415
+1694:2:3425
+1695:0:4001
+1696:2:2575
diff --git a/formal-model/urcu-controldataflow-intel-ipi/.input.define b/formal-model/urcu-controldataflow-intel-ipi/.input.define
new file mode 100644 (file)
index 0000000..1e4417f
--- /dev/null
@@ -0,0 +1 @@
+#define WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-ipi/.input.spin b/formal-model/urcu-controldataflow-intel-ipi/.input.spin
new file mode 100644 (file)
index 0000000..10c14d2
--- /dev/null
@@ -0,0 +1,1273 @@
+#define WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi/DEFINES b/formal-model/urcu-controldataflow-intel-ipi/DEFINES
new file mode 100644 (file)
index 0000000..abea5ff
--- /dev/null
@@ -0,0 +1,18 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
diff --git a/formal-model/urcu-controldataflow-intel-ipi/Makefile b/formal-model/urcu-controldataflow-intel-ipi/Makefile
new file mode 100644 (file)
index 0000000..de47dff
--- /dev/null
@@ -0,0 +1,170 @@
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) Mathieu Desnoyers, 2009
+#
+# Authors: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+#CFLAGS=-DSAFETY
+#for multi-core verif, 15.5GB shared mem, use files if full
+#CFLAGS=-DHASH64 -DMEMLIM=15500 -DNCORE=2
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88 -DMEMLIM=15500 -DNCORE=8
+
+#liveness
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88
+CFLAGS=-DHASH64
+
+SPINFILE=urcu.spin
+
+default:
+       make urcu_free | tee urcu_free.log
+       make urcu_free_no_mb | tee urcu_free_no_mb.log
+       make urcu_free_no_rmb | tee urcu_free_no_rmb.log
+       make urcu_free_no_wmb | tee urcu_free_no_wmb.log
+       make urcu_free_single_flip | tee urcu_free_single_flip.log
+       make urcu_progress_writer | tee urcu_progress_writer.log
+       make urcu_progress_reader | tee urcu_progress_reader.log
+       make urcu_progress_writer_error | tee urcu_progress_writer_error.log
+       make asserts | tee asserts.log
+       make summary
+
+#show trail : spin -v -t -N pan.ltl input.spin
+# after each individual make.
+
+summary:
+       @echo
+       @echo "Verification summary"
+       @grep errors: *.log
+
+asserts: clean
+       cat DEFINES > .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X .input.spin
+       gcc -O2 -w ${CFLAGS} -DSAFETY -o pan pan.c
+       ./pan -v -c1 -X -m10000000 -w20
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free: clean urcu_free_ltl run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested: clean urcu_free_ltl urcu_free_nested_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested_define:
+       cp urcu_free_nested.define .input.define
+
+urcu_free_no_rmb: clean urcu_free_ltl urcu_free_no_rmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_rmb_define:
+       cp urcu_free_no_rmb.define .input.define
+
+urcu_free_no_wmb: clean urcu_free_ltl urcu_free_no_wmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_wmb_define:
+       cp urcu_free_no_wmb.define .input.define
+
+urcu_free_no_mb: clean urcu_free_ltl urcu_free_no_mb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_mb_define:
+       cp urcu_free_no_mb.define .input.define
+
+urcu_free_single_flip: clean urcu_free_ltl urcu_free_single_flip_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_single_flip_define:
+       cp urcu_free_single_flip.define .input.define
+
+urcu_free_ltl:
+       touch .input.define
+       cat .input.define >> pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+
+# Progress checks
+
+urcu_progress_writer: clean urcu_progress_writer_ltl \
+               urcu_progress_writer_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_define:
+       cp urcu_progress_writer.define .input.define
+
+urcu_progress_writer_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_reader: clean urcu_progress_reader_ltl \
+               urcu_progress_reader_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_reader_define:
+       cp urcu_progress_reader.define .input.define
+
+urcu_progress_reader_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_writer_error: clean urcu_progress_writer_error_ltl \
+               urcu_progress_writer_error_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_error_define:
+       cp urcu_progress_writer_error.define .input.define
+
+urcu_progress_writer_error_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+
+run_weak_fair: pan
+       ./pan -a -f -v -c1 -X -m10000000 -w20
+
+run: pan
+       ./pan -a -v -c1 -X -m10000000 -w20
+
+pan: pan.c
+       gcc -O2 -w ${CFLAGS} -o pan pan.c
+
+pan.c: pan.ltl ${SPINFILE}
+       cat .input.define > .input.spin
+       cat DEFINES >> .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X -N pan.ltl .input.spin
+
+.PHONY: clean default distclean summary
+clean:
+       rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+distclean:
+       rm -f *.trail *.input *.log
diff --git a/formal-model/urcu-controldataflow-intel-ipi/references.txt b/formal-model/urcu-controldataflow-intel-ipi/references.txt
new file mode 100644 (file)
index 0000000..72c67a2
--- /dev/null
@@ -0,0 +1,13 @@
+http://spinroot.com/spin/Man/ltl.html
+http://en.wikipedia.org/wiki/Linear_temporal_logic
+http://www.dcs.gla.ac.uk/~muffy/MRS4-2002/lect11.ppt
+
+http://www.lsv.ens-cachan.fr/~gastin/ltl2ba/index.php
+http://spinroot.com/spin/Man/index.html
+http://spinroot.com/spin/Man/promela.html
+
+LTL vs CTL :
+
+http://spinroot.com/spin/Doc/course/lecture12.pdf p. 9, p. 15, p. 18
+http://www-i2.informatik.rwth-aachen.de/i2/fileadmin/user_upload/documents/Introduction_to_Model_Checking/mc_lec18.pdf
+  (downloaded)
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu.sh b/formal-model/urcu-controldataflow-intel-ipi/urcu.sh
new file mode 100644 (file)
index 0000000..65ff517
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Compiles and runs the urcu.spin Promela model.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) IBM Corporation, 2009
+#               Mathieu Desnoyers, 2009
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+#          Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+# Basic execution, without LTL clauses. See Makefile.
+
+spin -a urcu.spin
+cc -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w21
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu.spin b/formal-model/urcu-controldataflow-intel-ipi/urcu.spin
new file mode 100644 (file)
index 0000000..54752a1
--- /dev/null
@@ -0,0 +1,1254 @@
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free.log b/formal-model/urcu-controldataflow-intel-ipi/urcu_free.log
new file mode 100644 (file)
index 0000000..afb21ca
--- /dev/null
@@ -0,0 +1,503 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1294)
+Depth=    9223 States=    1e+06 Transitions= 6.87e+06 Memory=   550.432        t=   16.6 R=   6e+04
+Depth=    9223 States=    2e+06 Transitions= 1.47e+07 Memory=   634.318        t=   36.7 R=   5e+04
+Depth=    9223 States=    3e+06 Transitions= 2.46e+07 Memory=   718.303        t=   62.6 R=   5e+04
+pan: resizing hashtable to -w22..  done
+Depth=    9223 States=    4e+06 Transitions= 3.19e+07 Memory=   833.311        t=   81.2 R=   5e+04
+Depth=    9223 States=    5e+06 Transitions= 3.95e+07 Memory=   917.295        t=    100 R=   5e+04
+Depth=    9223 States=    6e+06 Transitions= 5.71e+07 Memory=  1001.279        t=    149 R=   4e+04
+Depth=    9223 States=    7e+06 Transitions= 6.81e+07 Memory=  1085.264        t=    178 R=   4e+04
+Depth=    9223 States=    8e+06 Transitions= 8.22e+07 Memory=  1169.151        t=    216 R=   4e+04
+Depth=    9223 States=    9e+06 Transitions= 9.54e+07 Memory=  1253.135        t=    252 R=   4e+04
+pan: resizing hashtable to -w24..  done
+Depth=    9223 States=    1e+07 Transitions= 1.08e+08 Memory=  1461.115        t=    288 R=   3e+04
+Depth=    9223 States=  1.1e+07 Transitions= 1.21e+08 Memory=  1545.100        t=    321 R=   3e+04
+Depth=    9223 States=  1.2e+07 Transitions=  1.3e+08 Memory=  1629.084        t=    345 R=   3e+04
+Depth=    9223 States=  1.3e+07 Transitions= 1.42e+08 Memory=  1713.068        t=    378 R=   3e+04
+Depth=    9223 States=  1.4e+07 Transitions= 1.72e+08 Memory=  1797.053        t=    463 R=   3e+04
+Depth=    9223 States=  1.5e+07 Transitions= 1.91e+08 Memory=  1881.037        t=    516 R=   3e+04
+Depth=    9223 States=  1.6e+07 Transitions= 2.08e+08 Memory=  1964.924        t=    562 R=   3e+04
+Depth=    9223 States=  1.7e+07 Transitions=  2.2e+08 Memory=  2048.908        t=    595 R=   3e+04
+Depth=    9223 States=  1.8e+07 Transitions= 2.39e+08 Memory=  2132.893        t=    647 R=   3e+04
+Depth=    9223 States=  1.9e+07 Transitions= 2.55e+08 Memory=  2216.877        t=    691 R=   3e+04
+Depth=    9223 States=    2e+07 Transitions= 2.72e+08 Memory=  2300.861        t=    739 R=   3e+04
+Depth=    9285 States=  2.1e+07 Transitions= 2.85e+08 Memory=  2384.846        t=    774 R=   3e+04
+Depth=    9324 States=  2.2e+07 Transitions= 2.99e+08 Memory=  2468.830        t=    812 R=   3e+04
+Depth=    9324 States=  2.3e+07 Transitions=  3.1e+08 Memory=  2552.717        t=    842 R=   3e+04
+Depth=    9324 States=  2.4e+07 Transitions= 3.21e+08 Memory=  2636.701        t=    873 R=   3e+04
+Depth=    9324 States=  2.5e+07 Transitions= 3.34e+08 Memory=  2720.686        t=    908 R=   3e+04
+Depth=    9324 States=  2.6e+07 Transitions= 3.45e+08 Memory=  2804.670        t=    939 R=   3e+04
+Depth=    9324 States=  2.7e+07 Transitions= 3.59e+08 Memory=  2888.654        t=    975 R=   3e+04
+Depth=    9324 States=  2.8e+07 Transitions= 3.71e+08 Memory=  2972.639        t= 1.01e+03 R=   3e+04
+Depth=    9324 States=  2.9e+07 Transitions= 3.84e+08 Memory=  3056.526        t= 1.04e+03 R=   3e+04
+Depth=    9324 States=    3e+07 Transitions= 3.96e+08 Memory=  3140.510        t= 1.08e+03 R=   3e+04
+Depth=    9324 States=  3.1e+07 Transitions= 4.09e+08 Memory=  3224.494        t= 1.11e+03 R=   3e+04
+Depth=    9324 States=  3.2e+07 Transitions= 4.19e+08 Memory=  3308.479        t= 1.14e+03 R=   3e+04
+Depth=    9324 States=  3.3e+07 Transitions=  4.3e+08 Memory=  3392.463        t= 1.17e+03 R=   3e+04
+Depth=    9324 States=  3.4e+07 Transitions= 4.44e+08 Memory=  3476.447        t= 1.21e+03 R=   3e+04
+pan: resizing hashtable to -w26..  done
+Depth=    9324 States=  3.5e+07 Transitions=  4.6e+08 Memory=  4056.416        t= 1.26e+03 R=   3e+04
+Depth=    9324 States=  3.6e+07 Transitions= 4.73e+08 Memory=  4140.401        t= 1.3e+03 R=   3e+04
+Depth=    9324 States=  3.7e+07 Transitions= 4.89e+08 Memory=  4224.385        t= 1.34e+03 R=   3e+04
+Depth=    9324 States=  3.8e+07 Transitions= 5.04e+08 Memory=  4308.369        t= 1.38e+03 R=   3e+04
+Depth=    9324 States=  3.9e+07 Transitions= 5.18e+08 Memory=  4392.354        t= 1.42e+03 R=   3e+04
+Depth=    9324 States=    4e+07 Transitions= 5.28e+08 Memory=  4476.338        t= 1.44e+03 R=   3e+04
+Depth=    9324 States=  4.1e+07 Transitions= 5.38e+08 Memory=  4560.225        t= 1.47e+03 R=   3e+04
+Depth=    9324 States=  4.2e+07 Transitions=  5.5e+08 Memory=  4644.209        t= 1.5e+03 R=   3e+04
+Depth=    9324 States=  4.3e+07 Transitions= 5.76e+08 Memory=  4728.193        t= 1.58e+03 R=   3e+04
+Depth=    9324 States=  4.4e+07 Transitions= 6.01e+08 Memory=  4812.178        t= 1.65e+03 R=   3e+04
+Depth=    9324 States=  4.5e+07 Transitions= 6.18e+08 Memory=  4896.162        t= 1.69e+03 R=   3e+04
+Depth=    9324 States=  4.6e+07 Transitions= 6.29e+08 Memory=  4980.147        t= 1.72e+03 R=   3e+04
+Depth=    9324 States=  4.7e+07 Transitions= 6.44e+08 Memory=  5064.131        t= 1.76e+03 R=   3e+04
+Depth=    9324 States=  4.8e+07 Transitions= 6.63e+08 Memory=  5148.018        t= 1.82e+03 R=   3e+04
+Depth=    9324 States=  4.9e+07 Transitions=  6.8e+08 Memory=  5232.002        t= 1.86e+03 R=   3e+04
+Depth=    9324 States=    5e+07 Transitions= 6.94e+08 Memory=  5315.986        t= 1.9e+03 R=   3e+04
+Depth=    9324 States=  5.1e+07 Transitions= 7.07e+08 Memory=  5399.971        t= 1.94e+03 R=   3e+04
+Depth=    9324 States=  5.2e+07 Transitions= 7.19e+08 Memory=  5483.955        t= 1.97e+03 R=   3e+04
+Depth=    9324 States=  5.3e+07 Transitions= 7.33e+08 Memory=  5567.940        t= 2.01e+03 R=   3e+04
+Depth=    9324 States=  5.4e+07 Transitions= 7.43e+08 Memory=  5651.826        t= 2.03e+03 R=   3e+04
+Depth=    9324 States=  5.5e+07 Transitions= 7.56e+08 Memory=  5735.811        t= 2.07e+03 R=   3e+04
+Depth=    9324 States=  5.6e+07 Transitions= 7.67e+08 Memory=  5819.795        t= 2.1e+03 R=   3e+04
+Depth=    9324 States=  5.7e+07 Transitions= 7.82e+08 Memory=  5903.779        t= 2.14e+03 R=   3e+04
+Depth=    9324 States=  5.8e+07 Transitions= 7.92e+08 Memory=  5987.764        t= 2.17e+03 R=   3e+04
+Depth=    9324 States=  5.9e+07 Transitions= 8.05e+08 Memory=  6071.748        t= 2.2e+03 R=   3e+04
+Depth=    9324 States=    6e+07 Transitions= 8.17e+08 Memory=  6155.733        t= 2.23e+03 R=   3e+04
+Depth=    9324 States=  6.1e+07 Transitions= 8.27e+08 Memory=  6239.619        t= 2.26e+03 R=   3e+04
+Depth=    9324 States=  6.2e+07 Transitions= 8.39e+08 Memory=  6323.604        t= 2.29e+03 R=   3e+04
+Depth=    9324 States=  6.3e+07 Transitions= 8.57e+08 Memory=  6407.588        t= 2.34e+03 R=   3e+04
+Depth=    9324 States=  6.4e+07 Transitions= 8.68e+08 Memory=  6491.572        t= 2.37e+03 R=   3e+04
+Depth=    9324 States=  6.5e+07 Transitions= 8.83e+08 Memory=  6575.557        t= 2.41e+03 R=   3e+04
+Depth=    9324 States=  6.6e+07 Transitions= 8.98e+08 Memory=  6659.541        t= 2.45e+03 R=   3e+04
+Depth=    9324 States=  6.7e+07 Transitions= 9.13e+08 Memory=  6743.428        t= 2.49e+03 R=   3e+04
+Depth=    9324 States=  6.8e+07 Transitions= 9.28e+08 Memory=  6827.412        t= 2.53e+03 R=   3e+04
+Depth=    9324 States=  6.9e+07 Transitions= 9.42e+08 Memory=  6911.397        t= 2.57e+03 R=   3e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 9324, errors: 0
+ 69786664 states, stored
+8.8218731e+08 states, matched
+9.5197398e+08 transitions (= stored+matched)
+1.4613809e+10 atomic steps
+hash conflicts: 5.3037519e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 7720.235      equivalent memory usage for states (stored*(State-vector + overhead))
+ 6011.129      actual memory usage for states (compression: 77.86%)
+               state-vector as stored = 62 byte + 28 byte overhead
+  512.000      memory used for hash table (-w26)
+  457.764      memory used for DFS stack (-m10000000)
+    3.383      memory lost to fragmentation
+ 6977.510      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 271, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 77, "(1)"
+       line 252, "pan.___", state 85, "(1)"
+       line 256, "pan.___", state 97, "(1)"
+       line 260, "pan.___", state 105, "(1)"
+       line 410, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 195, "(1)"
+       line 256, "pan.___", state 215, "(1)"
+       line 260, "pan.___", state 223, "(1)"
+       line 690, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 410, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 313, "(1)"
+       line 256, "pan.___", state 333, "(1)"
+       line 260, "pan.___", state 341, "(1)"
+       line 410, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 424, "(1)"
+       line 256, "pan.___", state 444, "(1)"
+       line 260, "pan.___", state 452, "(1)"
+       line 410, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 475, "(1)"
+       line 410, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 476, "else"
+       line 410, "pan.___", state 479, "(1)"
+       line 414, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 489, "(1)"
+       line 414, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 490, "else"
+       line 414, "pan.___", state 493, "(1)"
+       line 414, "pan.___", state 494, "(1)"
+       line 414, "pan.___", state 494, "(1)"
+       line 412, "pan.___", state 499, "((i<1))"
+       line 412, "pan.___", state 499, "((i>=1))"
+       line 419, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 507, "(1)"
+       line 419, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, "pan.___", state 508, "else"
+       line 419, "pan.___", state 511, "(1)"
+       line 419, "pan.___", state 512, "(1)"
+       line 419, "pan.___", state 512, "(1)"
+       line 423, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 521, "(1)"
+       line 423, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, "pan.___", state 522, "else"
+       line 423, "pan.___", state 525, "(1)"
+       line 423, "pan.___", state 526, "(1)"
+       line 423, "pan.___", state 526, "(1)"
+       line 421, "pan.___", state 531, "((i<2))"
+       line 421, "pan.___", state 531, "((i>=2))"
+       line 248, "pan.___", state 537, "(1)"
+       line 252, "pan.___", state 545, "(1)"
+       line 252, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 546, "else"
+       line 250, "pan.___", state 551, "((i<1))"
+       line 250, "pan.___", state 551, "((i>=1))"
+       line 256, "pan.___", state 557, "(1)"
+       line 256, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 558, "else"
+       line 260, "pan.___", state 565, "(1)"
+       line 260, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 260, "pan.___", state 566, "else"
+       line 258, "pan.___", state 571, "((i<2))"
+       line 258, "pan.___", state 571, "((i>=2))"
+       line 265, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 265, "pan.___", state 575, "else"
+       line 430, "pan.___", state 577, "(1)"
+       line 430, "pan.___", state 577, "(1)"
+       line 690, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 690, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 690, "pan.___", state 582, "(1)"
+       line 410, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 653, "(1)"
+       line 256, "pan.___", state 673, "(1)"
+       line 260, "pan.___", state 681, "(1)"
+       line 410, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 771, "(1)"
+       line 256, "pan.___", state 791, "(1)"
+       line 260, "pan.___", state 799, "(1)"
+       line 410, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 820, "(1)"
+       line 410, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 821, "else"
+       line 410, "pan.___", state 824, "(1)"
+       line 414, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 834, "(1)"
+       line 414, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 835, "else"
+       line 414, "pan.___", state 838, "(1)"
+       line 414, "pan.___", state 839, "(1)"
+       line 414, "pan.___", state 839, "(1)"
+       line 412, "pan.___", state 844, "((i<1))"
+       line 412, "pan.___", state 844, "((i>=1))"
+       line 419, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 852, "(1)"
+       line 419, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, "pan.___", state 853, "else"
+       line 419, "pan.___", state 856, "(1)"
+       line 419, "pan.___", state 857, "(1)"
+       line 419, "pan.___", state 857, "(1)"
+       line 423, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 866, "(1)"
+       line 423, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, "pan.___", state 867, "else"
+       line 423, "pan.___", state 870, "(1)"
+       line 423, "pan.___", state 871, "(1)"
+       line 423, "pan.___", state 871, "(1)"
+       line 421, "pan.___", state 876, "((i<2))"
+       line 421, "pan.___", state 876, "((i>=2))"
+       line 248, "pan.___", state 882, "(1)"
+       line 252, "pan.___", state 890, "(1)"
+       line 252, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 891, "else"
+       line 250, "pan.___", state 896, "((i<1))"
+       line 250, "pan.___", state 896, "((i>=1))"
+       line 256, "pan.___", state 902, "(1)"
+       line 256, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 903, "else"
+       line 260, "pan.___", state 910, "(1)"
+       line 260, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 260, "pan.___", state 911, "else"
+       line 258, "pan.___", state 916, "((i<2))"
+       line 258, "pan.___", state 916, "((i>=2))"
+       line 265, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 265, "pan.___", state 920, "else"
+       line 430, "pan.___", state 922, "(1)"
+       line 430, "pan.___", state 922, "(1)"
+       line 698, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 410, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 995, "(1)"
+       line 256, "pan.___", state 1015, "(1)"
+       line 260, "pan.___", state 1023, "(1)"
+       line 410, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1109, "(1)"
+       line 256, "pan.___", state 1129, "(1)"
+       line 260, "pan.___", state 1137, "(1)"
+       line 410, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1224, "(1)"
+       line 256, "pan.___", state 1244, "(1)"
+       line 260, "pan.___", state 1252, "(1)"
+       line 410, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1335, "(1)"
+       line 256, "pan.___", state 1355, "(1)"
+       line 260, "pan.___", state 1363, "(1)"
+       line 410, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1451, "(1)"
+       line 256, "pan.___", state 1471, "(1)"
+       line 260, "pan.___", state 1479, "(1)"
+       line 410, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1562, "(1)"
+       line 256, "pan.___", state 1582, "(1)"
+       line 260, "pan.___", state 1590, "(1)"
+       line 410, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1676, "(1)"
+       line 256, "pan.___", state 1696, "(1)"
+       line 260, "pan.___", state 1704, "(1)"
+       line 737, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 410, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1794, "(1)"
+       line 256, "pan.___", state 1814, "(1)"
+       line 260, "pan.___", state 1822, "(1)"
+       line 410, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1905, "(1)"
+       line 256, "pan.___", state 1925, "(1)"
+       line 260, "pan.___", state 1933, "(1)"
+       line 410, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1956, "(1)"
+       line 410, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 1957, "else"
+       line 410, "pan.___", state 1960, "(1)"
+       line 414, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1970, "(1)"
+       line 414, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 1971, "else"
+       line 414, "pan.___", state 1974, "(1)"
+       line 414, "pan.___", state 1975, "(1)"
+       line 414, "pan.___", state 1975, "(1)"
+       line 412, "pan.___", state 1980, "((i<1))"
+       line 412, "pan.___", state 1980, "((i>=1))"
+       line 419, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1988, "(1)"
+       line 419, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, "pan.___", state 1989, "else"
+       line 419, "pan.___", state 1992, "(1)"
+       line 419, "pan.___", state 1993, "(1)"
+       line 419, "pan.___", state 1993, "(1)"
+       line 423, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2002, "(1)"
+       line 423, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, "pan.___", state 2003, "else"
+       line 423, "pan.___", state 2006, "(1)"
+       line 423, "pan.___", state 2007, "(1)"
+       line 423, "pan.___", state 2007, "(1)"
+       line 421, "pan.___", state 2012, "((i<2))"
+       line 421, "pan.___", state 2012, "((i>=2))"
+       line 248, "pan.___", state 2018, "(1)"
+       line 252, "pan.___", state 2026, "(1)"
+       line 252, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 2027, "else"
+       line 250, "pan.___", state 2032, "((i<1))"
+       line 250, "pan.___", state 2032, "((i>=1))"
+       line 256, "pan.___", state 2038, "(1)"
+       line 256, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 2039, "else"
+       line 260, "pan.___", state 2046, "(1)"
+       line 260, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 260, "pan.___", state 2047, "else"
+       line 258, "pan.___", state 2052, "((i<2))"
+       line 258, "pan.___", state 2052, "((i>=2))"
+       line 265, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 265, "pan.___", state 2056, "else"
+       line 430, "pan.___", state 2058, "(1)"
+       line 430, "pan.___", state 2058, "(1)"
+       line 737, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 737, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 737, "pan.___", state 2063, "(1)"
+       line 410, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 2134, "(1)"
+       line 256, "pan.___", state 2154, "(1)"
+       line 260, "pan.___", state 2162, "(1)"
+       line 410, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 2251, "(1)"
+       line 256, "pan.___", state 2271, "(1)"
+       line 260, "pan.___", state 2279, "(1)"
+       line 410, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 2362, "(1)"
+       line 256, "pan.___", state 2382, "(1)"
+       line 260, "pan.___", state 2390, "(1)"
+       line 248, "pan.___", state 2421, "(1)"
+       line 256, "pan.___", state 2441, "(1)"
+       line 260, "pan.___", state 2449, "(1)"
+       line 248, "pan.___", state 2464, "(1)"
+       line 256, "pan.___", state 2484, "(1)"
+       line 260, "pan.___", state 2492, "(1)"
+       line 897, "pan.___", state 2509, "-end-"
+       (221 of 2509 states)
+unreached in proctype urcu_writer
+       line 410, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 82, "(1)"
+       line 252, "pan.___", state 90, "(1)"
+       line 256, "pan.___", state 102, "(1)"
+       line 271, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 257, "(1)"
+       line 252, "pan.___", state 265, "(1)"
+       line 256, "pan.___", state 277, "(1)"
+       line 260, "pan.___", state 285, "(1)"
+       line 414, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, "pan.___", state 378, "(1)"
+       line 256, "pan.___", state 390, "(1)"
+       line 260, "pan.___", state 398, "(1)"
+       line 414, "pan.___", state 441, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, "pan.___", state 499, "(1)"
+       line 256, "pan.___", state 511, "(1)"
+       line 260, "pan.___", state 519, "(1)"
+       line 414, "pan.___", state 552, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 570, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 584, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, "pan.___", state 610, "(1)"
+       line 256, "pan.___", state 622, "(1)"
+       line 260, "pan.___", state 630, "(1)"
+       line 414, "pan.___", state 665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, "pan.___", state 723, "(1)"
+       line 256, "pan.___", state 735, "(1)"
+       line 260, "pan.___", state 743, "(1)"
+       line 271, "pan.___", state 796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 805, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 820, "(1)"
+       line 283, "pan.___", state 827, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 843, "(1)"
+       line 252, "pan.___", state 851, "(1)"
+       line 256, "pan.___", state 863, "(1)"
+       line 260, "pan.___", state 871, "(1)"
+       line 271, "pan.___", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 911, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 924, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 933, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 949, "(1)"
+       line 252, "pan.___", state 957, "(1)"
+       line 256, "pan.___", state 969, "(1)"
+       line 260, "pan.___", state 977, "(1)"
+       line 275, "pan.___", state 1003, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1016, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1025, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1041, "(1)"
+       line 252, "pan.___", state 1049, "(1)"
+       line 256, "pan.___", state 1061, "(1)"
+       line 260, "pan.___", state 1069, "(1)"
+       line 271, "pan.___", state 1100, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 1109, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1122, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1131, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1147, "(1)"
+       line 252, "pan.___", state 1155, "(1)"
+       line 256, "pan.___", state 1167, "(1)"
+       line 260, "pan.___", state 1175, "(1)"
+       line 275, "pan.___", state 1201, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1214, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1239, "(1)"
+       line 252, "pan.___", state 1247, "(1)"
+       line 256, "pan.___", state 1259, "(1)"
+       line 260, "pan.___", state 1267, "(1)"
+       line 271, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 1307, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1320, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1329, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1345, "(1)"
+       line 252, "pan.___", state 1353, "(1)"
+       line 256, "pan.___", state 1365, "(1)"
+       line 260, "pan.___", state 1373, "(1)"
+       line 275, "pan.___", state 1399, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1412, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1421, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1437, "(1)"
+       line 252, "pan.___", state 1445, "(1)"
+       line 256, "pan.___", state 1457, "(1)"
+       line 260, "pan.___", state 1465, "(1)"
+       line 271, "pan.___", state 1496, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 1505, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1518, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1527, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1543, "(1)"
+       line 252, "pan.___", state 1551, "(1)"
+       line 256, "pan.___", state 1563, "(1)"
+       line 260, "pan.___", state 1571, "(1)"
+       line 1236, "pan.___", state 1587, "-end-"
+       (103 of 1587 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1299, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 2.6e+03 seconds
+pan: rate 26856.415 states/second
+pan: avg transition delay 2.7296e-06 usec
+cp .input.spin urcu_free.spin.input
+cp .input.spin.trail urcu_free.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free.ltl b/formal-model/urcu-controldataflow-intel-ipi/urcu_free.ltl
new file mode 100644 (file)
index 0000000..6be1be9
--- /dev/null
@@ -0,0 +1 @@
+[] (!read_poison)
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free.spin.input b/formal-model/urcu-controldataflow-intel-ipi/urcu_free.spin.input
new file mode 100644 (file)
index 0000000..66927ee
--- /dev/null
@@ -0,0 +1,1272 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_nested.define b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_nested.define
new file mode 100644 (file)
index 0000000..0fb59bd
--- /dev/null
@@ -0,0 +1 @@
+#define READER_NEST_LEVEL 2
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.define b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.define
new file mode 100644 (file)
index 0000000..d99d793
--- /dev/null
@@ -0,0 +1 @@
+#define NO_MB
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.log b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.log
new file mode 100644 (file)
index 0000000..1984f88
--- /dev/null
@@ -0,0 +1,592 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_mb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    8619 States=    1e+06 Transitions=    8e+06 Memory=   550.432        t=   20.2 R=   5e+04
+Depth=    8619 States=    2e+06 Transitions=  1.8e+07 Memory=   634.318        t=   46.3 R=   4e+04
+Depth=    8619 States=    3e+06 Transitions= 2.53e+07 Memory=   718.303        t=   64.9 R=   5e+04
+pan: resizing hashtable to -w22..  done
+Depth=    8619 States=    4e+06 Transitions= 3.53e+07 Memory=   833.311        t=   91.4 R=   4e+04
+Depth=    8619 States=    5e+06 Transitions= 5.71e+07 Memory=   917.295        t=    152 R=   3e+04
+Depth=    8619 States=    6e+06 Transitions= 6.81e+07 Memory=  1001.279        t=    181 R=   3e+04
+Depth=    8619 States=    7e+06 Transitions= 8.02e+07 Memory=  1085.264        t=    214 R=   3e+04
+Depth=    8619 States=    8e+06 Transitions=  8.9e+07 Memory=  1169.151        t=    238 R=   3e+04
+pan: claim violated! (at depth 1359)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 8619, errors: 1
+  8162162 states, stored
+ 81941434 states, matched
+ 90103596 transitions (= stored+matched)
+1.3256628e+09 atomic steps
+hash conflicts:  60101961 (resolved)
+
+Stats on memory usage (in Megabytes):
+  902.949      equivalent memory usage for states (stored*(State-vector + overhead))
+  693.526      actual memory usage for states (compression: 76.81%)
+               state-vector as stored = 61 byte + 28 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+ 1182.822      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 272, "pan.___", state 34, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 56, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 65, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 81, "(1)"
+       line 253, "pan.___", state 89, "(1)"
+       line 257, "pan.___", state 101, "(1)"
+       line 261, "pan.___", state 109, "(1)"
+       line 411, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 167, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 199, "(1)"
+       line 257, "pan.___", state 219, "(1)"
+       line 261, "pan.___", state 227, "(1)"
+       line 700, "pan.___", state 246, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 253, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 285, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 299, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 317, "(1)"
+       line 257, "pan.___", state 337, "(1)"
+       line 261, "pan.___", state 345, "(1)"
+       line 411, "pan.___", state 364, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 396, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 410, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 428, "(1)"
+       line 257, "pan.___", state 448, "(1)"
+       line 261, "pan.___", state 456, "(1)"
+       line 411, "pan.___", state 477, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 479, "(1)"
+       line 411, "pan.___", state 480, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 480, "else"
+       line 411, "pan.___", state 483, "(1)"
+       line 415, "pan.___", state 491, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 493, "(1)"
+       line 415, "pan.___", state 494, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 494, "else"
+       line 415, "pan.___", state 497, "(1)"
+       line 415, "pan.___", state 498, "(1)"
+       line 415, "pan.___", state 498, "(1)"
+       line 413, "pan.___", state 503, "((i<1))"
+       line 413, "pan.___", state 503, "((i>=1))"
+       line 420, "pan.___", state 509, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 511, "(1)"
+       line 420, "pan.___", state 512, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 512, "else"
+       line 420, "pan.___", state 515, "(1)"
+       line 420, "pan.___", state 516, "(1)"
+       line 420, "pan.___", state 516, "(1)"
+       line 424, "pan.___", state 523, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 525, "(1)"
+       line 424, "pan.___", state 526, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 526, "else"
+       line 424, "pan.___", state 529, "(1)"
+       line 424, "pan.___", state 530, "(1)"
+       line 424, "pan.___", state 530, "(1)"
+       line 422, "pan.___", state 535, "((i<2))"
+       line 422, "pan.___", state 535, "((i>=2))"
+       line 249, "pan.___", state 541, "(1)"
+       line 253, "pan.___", state 549, "(1)"
+       line 253, "pan.___", state 550, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 550, "else"
+       line 251, "pan.___", state 555, "((i<1))"
+       line 251, "pan.___", state 555, "((i>=1))"
+       line 257, "pan.___", state 561, "(1)"
+       line 257, "pan.___", state 562, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 562, "else"
+       line 261, "pan.___", state 569, "(1)"
+       line 261, "pan.___", state 570, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 570, "else"
+       line 259, "pan.___", state 575, "((i<2))"
+       line 259, "pan.___", state 575, "((i>=2))"
+       line 266, "pan.___", state 579, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 579, "else"
+       line 431, "pan.___", state 581, "(1)"
+       line 431, "pan.___", state 581, "(1)"
+       line 700, "pan.___", state 584, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 700, "pan.___", state 585, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 700, "pan.___", state 586, "(1)"
+       line 411, "pan.___", state 593, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 625, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 639, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 657, "(1)"
+       line 257, "pan.___", state 677, "(1)"
+       line 261, "pan.___", state 685, "(1)"
+       line 411, "pan.___", state 711, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 743, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 757, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 775, "(1)"
+       line 257, "pan.___", state 795, "(1)"
+       line 261, "pan.___", state 803, "(1)"
+       line 411, "pan.___", state 822, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 824, "(1)"
+       line 411, "pan.___", state 825, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 825, "else"
+       line 411, "pan.___", state 828, "(1)"
+       line 415, "pan.___", state 836, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 838, "(1)"
+       line 415, "pan.___", state 839, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 839, "else"
+       line 415, "pan.___", state 842, "(1)"
+       line 415, "pan.___", state 843, "(1)"
+       line 415, "pan.___", state 843, "(1)"
+       line 413, "pan.___", state 848, "((i<1))"
+       line 413, "pan.___", state 848, "((i>=1))"
+       line 420, "pan.___", state 854, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 856, "(1)"
+       line 420, "pan.___", state 857, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 857, "else"
+       line 420, "pan.___", state 860, "(1)"
+       line 420, "pan.___", state 861, "(1)"
+       line 420, "pan.___", state 861, "(1)"
+       line 424, "pan.___", state 868, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 870, "(1)"
+       line 424, "pan.___", state 871, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 871, "else"
+       line 424, "pan.___", state 874, "(1)"
+       line 424, "pan.___", state 875, "(1)"
+       line 424, "pan.___", state 875, "(1)"
+       line 422, "pan.___", state 880, "((i<2))"
+       line 422, "pan.___", state 880, "((i>=2))"
+       line 249, "pan.___", state 886, "(1)"
+       line 253, "pan.___", state 894, "(1)"
+       line 253, "pan.___", state 895, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 895, "else"
+       line 251, "pan.___", state 900, "((i<1))"
+       line 251, "pan.___", state 900, "((i>=1))"
+       line 257, "pan.___", state 906, "(1)"
+       line 257, "pan.___", state 907, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 907, "else"
+       line 261, "pan.___", state 914, "(1)"
+       line 261, "pan.___", state 915, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 915, "else"
+       line 259, "pan.___", state 920, "((i<2))"
+       line 259, "pan.___", state 920, "((i>=2))"
+       line 266, "pan.___", state 924, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 924, "else"
+       line 431, "pan.___", state 926, "(1)"
+       line 431, "pan.___", state 926, "(1)"
+       line 708, "pan.___", state 930, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 935, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 967, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 981, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 999, "(1)"
+       line 257, "pan.___", state 1019, "(1)"
+       line 261, "pan.___", state 1027, "(1)"
+       line 411, "pan.___", state 1049, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1081, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1095, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1113, "(1)"
+       line 257, "pan.___", state 1133, "(1)"
+       line 261, "pan.___", state 1141, "(1)"
+       line 411, "pan.___", state 1164, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1196, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1210, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1228, "(1)"
+       line 257, "pan.___", state 1248, "(1)"
+       line 261, "pan.___", state 1256, "(1)"
+       line 411, "pan.___", state 1275, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1307, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1321, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1339, "(1)"
+       line 257, "pan.___", state 1359, "(1)"
+       line 261, "pan.___", state 1367, "(1)"
+       line 411, "pan.___", state 1391, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1423, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1437, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1455, "(1)"
+       line 257, "pan.___", state 1475, "(1)"
+       line 261, "pan.___", state 1483, "(1)"
+       line 411, "pan.___", state 1502, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1534, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1548, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1566, "(1)"
+       line 257, "pan.___", state 1586, "(1)"
+       line 261, "pan.___", state 1594, "(1)"
+       line 411, "pan.___", state 1616, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1648, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1662, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1680, "(1)"
+       line 257, "pan.___", state 1700, "(1)"
+       line 261, "pan.___", state 1708, "(1)"
+       line 747, "pan.___", state 1727, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 1734, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1766, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1780, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1798, "(1)"
+       line 257, "pan.___", state 1818, "(1)"
+       line 261, "pan.___", state 1826, "(1)"
+       line 411, "pan.___", state 1845, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1877, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1891, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1909, "(1)"
+       line 257, "pan.___", state 1929, "(1)"
+       line 261, "pan.___", state 1937, "(1)"
+       line 411, "pan.___", state 1958, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1960, "(1)"
+       line 411, "pan.___", state 1961, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 1961, "else"
+       line 411, "pan.___", state 1964, "(1)"
+       line 415, "pan.___", state 1972, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1974, "(1)"
+       line 415, "pan.___", state 1975, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 1975, "else"
+       line 415, "pan.___", state 1978, "(1)"
+       line 415, "pan.___", state 1979, "(1)"
+       line 415, "pan.___", state 1979, "(1)"
+       line 413, "pan.___", state 1984, "((i<1))"
+       line 413, "pan.___", state 1984, "((i>=1))"
+       line 420, "pan.___", state 1990, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1992, "(1)"
+       line 420, "pan.___", state 1993, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 1993, "else"
+       line 420, "pan.___", state 1996, "(1)"
+       line 420, "pan.___", state 1997, "(1)"
+       line 420, "pan.___", state 1997, "(1)"
+       line 424, "pan.___", state 2004, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2006, "(1)"
+       line 424, "pan.___", state 2007, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2007, "else"
+       line 424, "pan.___", state 2010, "(1)"
+       line 424, "pan.___", state 2011, "(1)"
+       line 424, "pan.___", state 2011, "(1)"
+       line 422, "pan.___", state 2016, "((i<2))"
+       line 422, "pan.___", state 2016, "((i>=2))"
+       line 249, "pan.___", state 2022, "(1)"
+       line 253, "pan.___", state 2030, "(1)"
+       line 253, "pan.___", state 2031, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2031, "else"
+       line 251, "pan.___", state 2036, "((i<1))"
+       line 251, "pan.___", state 2036, "((i>=1))"
+       line 257, "pan.___", state 2042, "(1)"
+       line 257, "pan.___", state 2043, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2043, "else"
+       line 261, "pan.___", state 2050, "(1)"
+       line 261, "pan.___", state 2051, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2051, "else"
+       line 259, "pan.___", state 2056, "((i<2))"
+       line 259, "pan.___", state 2056, "((i>=2))"
+       line 266, "pan.___", state 2060, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2060, "else"
+       line 431, "pan.___", state 2062, "(1)"
+       line 431, "pan.___", state 2062, "(1)"
+       line 747, "pan.___", state 2065, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 747, "pan.___", state 2066, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 747, "pan.___", state 2067, "(1)"
+       line 411, "pan.___", state 2074, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2106, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2120, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2138, "(1)"
+       line 257, "pan.___", state 2158, "(1)"
+       line 261, "pan.___", state 2166, "(1)"
+       line 411, "pan.___", state 2191, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2223, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2237, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2255, "(1)"
+       line 257, "pan.___", state 2275, "(1)"
+       line 261, "pan.___", state 2283, "(1)"
+       line 411, "pan.___", state 2302, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2334, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2348, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2366, "(1)"
+       line 257, "pan.___", state 2386, "(1)"
+       line 261, "pan.___", state 2394, "(1)"
+       line 249, "pan.___", state 2425, "(1)"
+       line 257, "pan.___", state 2445, "(1)"
+       line 261, "pan.___", state 2453, "(1)"
+       line 249, "pan.___", state 2468, "(1)"
+       line 257, "pan.___", state 2488, "(1)"
+       line 261, "pan.___", state 2496, "(1)"
+       line 898, "pan.___", state 2513, "-end-"
+       (221 of 2513 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 20, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 26, "(1)"
+       line 415, "pan.___", state 34, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 40, "(1)"
+       line 415, "pan.___", state 41, "(1)"
+       line 415, "pan.___", state 41, "(1)"
+       line 413, "pan.___", state 46, "((i<1))"
+       line 413, "pan.___", state 46, "((i>=1))"
+       line 420, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 58, "(1)"
+       line 420, "pan.___", state 59, "(1)"
+       line 420, "pan.___", state 59, "(1)"
+       line 424, "pan.___", state 72, "(1)"
+       line 424, "pan.___", state 73, "(1)"
+       line 424, "pan.___", state 73, "(1)"
+       line 422, "pan.___", state 78, "((i<2))"
+       line 422, "pan.___", state 78, "((i>=2))"
+       line 249, "pan.___", state 84, "(1)"
+       line 253, "pan.___", state 92, "(1)"
+       line 253, "pan.___", state 93, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 93, "else"
+       line 251, "pan.___", state 98, "((i<1))"
+       line 251, "pan.___", state 98, "((i>=1))"
+       line 257, "pan.___", state 104, "(1)"
+       line 257, "pan.___", state 105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 105, "else"
+       line 261, "pan.___", state 112, "(1)"
+       line 261, "pan.___", state 113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 113, "else"
+       line 259, "pan.___", state 118, "((i<2))"
+       line 259, "pan.___", state 118, "((i>=2))"
+       line 266, "pan.___", state 122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 122, "else"
+       line 431, "pan.___", state 124, "(1)"
+       line 431, "pan.___", state 124, "(1)"
+       line 272, "pan.___", state 133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 142, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 150, "((i<1))"
+       line 274, "pan.___", state 150, "((i>=1))"
+       line 280, "pan.___", state 155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 1021, "pan.___", state 183, "old_data = cached_rcu_ptr.val[_pid]"
+       line 1032, "pan.___", state 187, "_proc_urcu_writer = (_proc_urcu_writer|(1<<4))"
+       line 411, "pan.___", state 195, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 201, "(1)"
+       line 415, "pan.___", state 209, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 215, "(1)"
+       line 415, "pan.___", state 216, "(1)"
+       line 415, "pan.___", state 216, "(1)"
+       line 420, "pan.___", state 229, "(1)"
+       line 424, "pan.___", state 241, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 259, "(1)"
+       line 253, "pan.___", state 267, "(1)"
+       line 261, "pan.___", state 287, "(1)"
+       line 431, "pan.___", state 299, "(1)"
+       line 431, "pan.___", state 299, "(1)"
+       line 415, "pan.___", state 322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 380, "(1)"
+       line 261, "pan.___", state 400, "(1)"
+       line 415, "pan.___", state 443, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 501, "(1)"
+       line 415, "pan.___", state 554, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 612, "(1)"
+       line 415, "pan.___", state 667, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 699, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 725, "(1)"
+       line 261, "pan.___", state 745, "(1)"
+       line 1168, "pan.___", state 770, "_proc_urcu_writer = (_proc_urcu_writer|(1<<13))"
+       line 272, "pan.___", state 798, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 800, "(1)"
+       line 276, "pan.___", state 807, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 809, "(1)"
+       line 276, "pan.___", state 810, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 810, "else"
+       line 274, "pan.___", state 815, "((i<1))"
+       line 274, "pan.___", state 815, "((i>=1))"
+       line 280, "pan.___", state 820, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 822, "(1)"
+       line 280, "pan.___", state 823, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 823, "else"
+       line 284, "pan.___", state 829, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 831, "(1)"
+       line 284, "pan.___", state 832, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 832, "else"
+       line 282, "pan.___", state 837, "((i<2))"
+       line 282, "pan.___", state 837, "((i>=2))"
+       line 249, "pan.___", state 845, "(1)"
+       line 253, "pan.___", state 853, "(1)"
+       line 253, "pan.___", state 854, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 854, "else"
+       line 251, "pan.___", state 859, "((i<1))"
+       line 251, "pan.___", state 859, "((i>=1))"
+       line 257, "pan.___", state 865, "(1)"
+       line 257, "pan.___", state 866, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 866, "else"
+       line 261, "pan.___", state 873, "(1)"
+       line 261, "pan.___", state 874, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 874, "else"
+       line 266, "pan.___", state 883, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 883, "else"
+       line 1222, "pan.___", state 899, "((i<1))"
+       line 1222, "pan.___", state 899, "((i>=1))"
+       line 272, "pan.___", state 904, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 906, "(1)"
+       line 276, "pan.___", state 913, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 915, "(1)"
+       line 276, "pan.___", state 916, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 916, "else"
+       line 274, "pan.___", state 921, "((i<1))"
+       line 274, "pan.___", state 921, "((i>=1))"
+       line 280, "pan.___", state 926, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 928, "(1)"
+       line 280, "pan.___", state 929, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 929, "else"
+       line 284, "pan.___", state 935, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 937, "(1)"
+       line 284, "pan.___", state 938, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 938, "else"
+       line 282, "pan.___", state 943, "((i<2))"
+       line 282, "pan.___", state 943, "((i>=2))"
+       line 249, "pan.___", state 951, "(1)"
+       line 253, "pan.___", state 959, "(1)"
+       line 253, "pan.___", state 960, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 960, "else"
+       line 251, "pan.___", state 965, "((i<1))"
+       line 251, "pan.___", state 965, "((i>=1))"
+       line 257, "pan.___", state 971, "(1)"
+       line 257, "pan.___", state 972, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 972, "else"
+       line 261, "pan.___", state 979, "(1)"
+       line 261, "pan.___", state 980, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 980, "else"
+       line 266, "pan.___", state 989, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 989, "else"
+       line 299, "pan.___", state 991, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 991, "else"
+       line 1222, "pan.___", state 992, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 1222, "pan.___", state 992, "else"
+       line 276, "pan.___", state 1005, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1018, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1027, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1043, "(1)"
+       line 253, "pan.___", state 1051, "(1)"
+       line 257, "pan.___", state 1063, "(1)"
+       line 261, "pan.___", state 1071, "(1)"
+       line 272, "pan.___", state 1102, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1111, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1124, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1133, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1149, "(1)"
+       line 253, "pan.___", state 1157, "(1)"
+       line 257, "pan.___", state 1169, "(1)"
+       line 261, "pan.___", state 1177, "(1)"
+       line 276, "pan.___", state 1203, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1225, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1241, "(1)"
+       line 253, "pan.___", state 1249, "(1)"
+       line 257, "pan.___", state 1261, "(1)"
+       line 261, "pan.___", state 1269, "(1)"
+       line 272, "pan.___", state 1300, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1309, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1322, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1331, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1347, "(1)"
+       line 253, "pan.___", state 1355, "(1)"
+       line 257, "pan.___", state 1367, "(1)"
+       line 261, "pan.___", state 1375, "(1)"
+       line 272, "pan.___", state 1392, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 1394, "(1)"
+       line 276, "pan.___", state 1401, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1403, "(1)"
+       line 276, "pan.___", state 1404, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 1404, "else"
+       line 274, "pan.___", state 1409, "((i<1))"
+       line 274, "pan.___", state 1409, "((i>=1))"
+       line 280, "pan.___", state 1414, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1416, "(1)"
+       line 280, "pan.___", state 1417, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1417, "else"
+       line 284, "pan.___", state 1423, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1425, "(1)"
+       line 284, "pan.___", state 1426, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1426, "else"
+       line 282, "pan.___", state 1431, "((i<2))"
+       line 282, "pan.___", state 1431, "((i>=2))"
+       line 249, "pan.___", state 1439, "(1)"
+       line 253, "pan.___", state 1447, "(1)"
+       line 253, "pan.___", state 1448, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1448, "else"
+       line 251, "pan.___", state 1453, "((i<1))"
+       line 251, "pan.___", state 1453, "((i>=1))"
+       line 257, "pan.___", state 1459, "(1)"
+       line 257, "pan.___", state 1460, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1460, "else"
+       line 261, "pan.___", state 1467, "(1)"
+       line 261, "pan.___", state 1468, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1468, "else"
+       line 266, "pan.___", state 1477, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1477, "else"
+       line 1233, "pan.___", state 1480, "i = 0"
+       line 1233, "pan.___", state 1482, "reader_barrier = 1"
+       line 1233, "pan.___", state 1493, "((i<1))"
+       line 1233, "pan.___", state 1493, "((i>=1))"
+       line 272, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 1500, "(1)"
+       line 276, "pan.___", state 1507, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1509, "(1)"
+       line 276, "pan.___", state 1510, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 1510, "else"
+       line 274, "pan.___", state 1515, "((i<1))"
+       line 274, "pan.___", state 1515, "((i>=1))"
+       line 280, "pan.___", state 1520, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1522, "(1)"
+       line 280, "pan.___", state 1523, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1523, "else"
+       line 284, "pan.___", state 1529, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1531, "(1)"
+       line 284, "pan.___", state 1532, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1532, "else"
+       line 282, "pan.___", state 1537, "((i<2))"
+       line 282, "pan.___", state 1537, "((i>=2))"
+       line 249, "pan.___", state 1545, "(1)"
+       line 253, "pan.___", state 1553, "(1)"
+       line 253, "pan.___", state 1554, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1554, "else"
+       line 251, "pan.___", state 1559, "((i<1))"
+       line 251, "pan.___", state 1559, "((i>=1))"
+       line 257, "pan.___", state 1565, "(1)"
+       line 257, "pan.___", state 1566, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1566, "else"
+       line 261, "pan.___", state 1573, "(1)"
+       line 261, "pan.___", state 1574, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1574, "else"
+       line 266, "pan.___", state 1583, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1583, "else"
+       line 299, "pan.___", state 1585, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 1585, "else"
+       line 1233, "pan.___", state 1586, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 1233, "pan.___", state 1586, "else"
+       line 1237, "pan.___", state 1589, "-end-"
+       (179 of 1589 states)
+unreached in proctype :init:
+       line 1248, "pan.___", state 9, "((j<2))"
+       line 1248, "pan.___", state 9, "((j>=2))"
+       line 1249, "pan.___", state 20, "((j<2))"
+       line 1249, "pan.___", state 20, "((j>=2))"
+       line 1254, "pan.___", state 33, "((j<2))"
+       line 1254, "pan.___", state 33, "((j>=2))"
+       line 1252, "pan.___", state 43, "((i<1))"
+       line 1252, "pan.___", state 43, "((i>=1))"
+       line 1262, "pan.___", state 54, "((j<2))"
+       line 1262, "pan.___", state 54, "((j>=2))"
+       line 1266, "pan.___", state 67, "((j<2))"
+       line 1266, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 241 seconds
+pan: rate 33888.985 states/second
+pan: avg transition delay 2.673e-06 usec
+cp .input.spin urcu_free_no_mb.spin.input
+cp .input.spin.trail urcu_free_no_mb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.spin.input b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.spin.input
new file mode 100644 (file)
index 0000000..ddb4112
--- /dev/null
@@ -0,0 +1,1273 @@
+#define NO_MB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.spin.input.trail b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_mb.spin.input.trail
new file mode 100644 (file)
index 0000000..e886f80
--- /dev/null
@@ -0,0 +1,1362 @@
+-2:3:-2
+-4:-4:-4
+1:0:4182
+2:3:4102
+3:3:4105
+4:3:4105
+5:3:4108
+6:3:4116
+7:3:4116
+8:3:4119
+9:3:4125
+10:3:4129
+11:3:4129
+12:3:4132
+13:3:4142
+14:3:4150
+15:3:4150
+16:3:4153
+17:3:4159
+18:3:4163
+19:3:4163
+20:3:4166
+21:3:4172
+22:3:4176
+23:3:4177
+24:0:4182
+25:3:4179
+26:0:4182
+27:2:2515
+28:0:4182
+29:2:2521
+30:0:4182
+31:2:2522
+32:0:4182
+33:2:2524
+34:0:4182
+35:2:2525
+36:0:4182
+37:2:2526
+38:0:4182
+39:2:2527
+40:0:4182
+41:2:2528
+42:2:2529
+43:2:2533
+44:2:2534
+45:2:2542
+46:2:2543
+47:2:2547
+48:2:2548
+49:2:2556
+50:2:2561
+51:2:2565
+52:2:2566
+53:2:2574
+54:2:2575
+55:2:2579
+56:2:2580
+57:2:2574
+58:2:2575
+59:2:2579
+60:2:2580
+61:2:2588
+62:2:2593
+63:2:2600
+64:2:2601
+65:2:2608
+66:2:2613
+67:2:2620
+68:2:2621
+69:2:2620
+70:2:2621
+71:2:2628
+72:2:2638
+73:0:4182
+74:2:2527
+75:0:4182
+76:2:2642
+77:2:2646
+78:2:2647
+79:2:2651
+80:2:2655
+81:2:2656
+82:2:2660
+83:2:2668
+84:2:2669
+85:2:2673
+86:2:2677
+87:2:2678
+88:2:2673
+89:2:2674
+90:2:2682
+91:0:4182
+92:2:2527
+93:0:4182
+94:2:2690
+95:2:2691
+96:2:2692
+97:0:4182
+98:2:2527
+99:0:4182
+100:2:2700
+101:0:4182
+102:2:2527
+103:0:4182
+104:2:2703
+105:2:2704
+106:2:2708
+107:2:2709
+108:2:2717
+109:2:2718
+110:2:2722
+111:2:2723
+112:2:2731
+113:2:2736
+114:2:2737
+115:2:2749
+116:2:2750
+117:2:2754
+118:2:2755
+119:2:2749
+120:2:2750
+121:2:2754
+122:2:2755
+123:2:2763
+124:2:2768
+125:2:2775
+126:2:2776
+127:2:2783
+128:2:2788
+129:2:2795
+130:2:2796
+131:2:2795
+132:2:2796
+133:2:2803
+134:2:2812
+135:0:4182
+136:2:2527
+137:0:4182
+138:2:2816
+139:2:2817
+140:2:2818
+141:2:2830
+142:2:2831
+143:2:2835
+144:2:2836
+145:2:2844
+146:2:2849
+147:2:2853
+148:2:2854
+149:2:2862
+150:2:2863
+151:2:2867
+152:2:2868
+153:2:2862
+154:2:2863
+155:2:2867
+156:2:2868
+157:2:2876
+158:2:2881
+159:2:2888
+160:2:2889
+161:2:2896
+162:2:2901
+163:2:2908
+164:2:2909
+165:2:2908
+166:2:2909
+167:2:2916
+168:2:2929
+169:2:2930
+170:0:4182
+171:2:2527
+172:0:4182
+173:2:2937
+174:2:2938
+175:2:2942
+176:2:2943
+177:2:2951
+178:2:2952
+179:2:2956
+180:2:2957
+181:2:2965
+182:2:2970
+183:2:2974
+184:2:2975
+185:2:2983
+186:2:2984
+187:2:2988
+188:2:2989
+189:2:2983
+190:2:2984
+191:2:2988
+192:2:2989
+193:2:2997
+194:2:3002
+195:2:3009
+196:2:3010
+197:2:3017
+198:2:3022
+199:2:3029
+200:2:3030
+201:2:3029
+202:2:3030
+203:2:3037
+204:0:4182
+205:2:2527
+206:0:4182
+207:2:3048
+208:2:3049
+209:2:3053
+210:2:3054
+211:2:3062
+212:2:3063
+213:2:3067
+214:2:3068
+215:2:3076
+216:2:3081
+217:2:3085
+218:2:3086
+219:2:3094
+220:2:3095
+221:2:3099
+222:2:3100
+223:2:3094
+224:2:3095
+225:2:3099
+226:2:3100
+227:2:3108
+228:2:3113
+229:2:3120
+230:2:3121
+231:2:3128
+232:2:3133
+233:2:3140
+234:2:3141
+235:2:3140
+236:2:3141
+237:2:3148
+238:2:3157
+239:0:4182
+240:2:2527
+241:0:4182
+242:2:3161
+243:2:3162
+244:2:3163
+245:2:3175
+246:2:3176
+247:2:3180
+248:2:3181
+249:2:3189
+250:2:3194
+251:2:3198
+252:2:3199
+253:2:3207
+254:2:3208
+255:2:3212
+256:2:3213
+257:2:3207
+258:2:3208
+259:2:3212
+260:2:3213
+261:2:3221
+262:2:3226
+263:2:3233
+264:2:3234
+265:2:3241
+266:2:3246
+267:2:3253
+268:2:3254
+269:2:3253
+270:2:3254
+271:2:3261
+272:2:3273
+273:2:3274
+274:0:4182
+275:2:2527
+276:0:4182
+277:2:3283
+278:2:3284
+279:0:4182
+280:2:2527
+281:0:4182
+282:2:3288
+283:0:4182
+284:2:3296
+285:0:4182
+286:2:2522
+287:0:4182
+288:2:2524
+289:0:4182
+290:2:2525
+291:0:4182
+292:2:2526
+293:0:4182
+294:2:2527
+295:0:4182
+296:2:2528
+297:2:2529
+298:2:2533
+299:2:2534
+300:2:2542
+301:2:2543
+302:2:2547
+303:2:2548
+304:2:2556
+305:2:2561
+306:2:2565
+307:2:2566
+308:2:2574
+309:2:2575
+310:2:2576
+311:2:2574
+312:2:2575
+313:2:2579
+314:2:2580
+315:2:2588
+316:2:2593
+317:2:2600
+318:2:2601
+319:2:2608
+320:2:2613
+321:2:2620
+322:2:2621
+323:2:2620
+324:2:2621
+325:2:2628
+326:2:2638
+327:0:4182
+328:2:2527
+329:0:4182
+330:2:2642
+331:2:2646
+332:2:2647
+333:2:2651
+334:2:2655
+335:2:2656
+336:2:2660
+337:2:2668
+338:2:2669
+339:2:2673
+340:2:2674
+341:2:2673
+342:2:2677
+343:2:2678
+344:2:2682
+345:0:4182
+346:2:2527
+347:0:4182
+348:2:2690
+349:2:2691
+350:2:2692
+351:0:4182
+352:2:2527
+353:0:4182
+354:2:2700
+355:0:4182
+356:2:2527
+357:0:4182
+358:2:2703
+359:2:2704
+360:2:2708
+361:2:2709
+362:2:2717
+363:2:2718
+364:2:2722
+365:2:2723
+366:2:2731
+367:2:2736
+368:2:2737
+369:2:2749
+370:2:2750
+371:2:2754
+372:2:2755
+373:2:2749
+374:2:2750
+375:2:2754
+376:2:2755
+377:2:2763
+378:2:2768
+379:2:2775
+380:2:2776
+381:2:2783
+382:2:2788
+383:2:2795
+384:2:2796
+385:2:2795
+386:2:2796
+387:2:2803
+388:2:2812
+389:0:4182
+390:2:2527
+391:0:4182
+392:2:2816
+393:2:2817
+394:2:2818
+395:2:2830
+396:2:2831
+397:2:2835
+398:2:2836
+399:2:2844
+400:2:2849
+401:2:2853
+402:2:2854
+403:2:2862
+404:2:2863
+405:2:2867
+406:2:2868
+407:2:2862
+408:2:2863
+409:2:2867
+410:2:2868
+411:2:2876
+412:2:2881
+413:2:2888
+414:2:2889
+415:2:2896
+416:2:2901
+417:2:2908
+418:2:2909
+419:2:2908
+420:2:2909
+421:2:2916
+422:2:2929
+423:2:2930
+424:0:4182
+425:2:2527
+426:0:4182
+427:2:2937
+428:2:2938
+429:2:2942
+430:2:2943
+431:2:2951
+432:2:2952
+433:2:2956
+434:2:2957
+435:2:2965
+436:2:2970
+437:2:2974
+438:2:2975
+439:2:2983
+440:2:2984
+441:2:2988
+442:2:2989
+443:2:2983
+444:2:2984
+445:2:2988
+446:2:2989
+447:2:2997
+448:2:3002
+449:2:3009
+450:2:3010
+451:2:3017
+452:2:3022
+453:2:3029
+454:2:3030
+455:2:3029
+456:2:3030
+457:2:3037
+458:0:4182
+459:2:2527
+460:0:4182
+461:2:3048
+462:2:3049
+463:2:3053
+464:2:3054
+465:2:3062
+466:2:3063
+467:2:3067
+468:2:3068
+469:2:3076
+470:2:3081
+471:2:3085
+472:2:3086
+473:2:3094
+474:2:3095
+475:2:3099
+476:2:3100
+477:2:3094
+478:2:3095
+479:2:3099
+480:2:3100
+481:2:3108
+482:2:3113
+483:2:3120
+484:2:3121
+485:2:3128
+486:2:3133
+487:2:3140
+488:2:3141
+489:2:3140
+490:2:3141
+491:2:3148
+492:2:3157
+493:0:4182
+494:2:2527
+495:0:4182
+496:2:3161
+497:2:3162
+498:2:3163
+499:2:3175
+500:2:3176
+501:2:3180
+502:2:3181
+503:2:3189
+504:2:3194
+505:2:3198
+506:2:3199
+507:2:3207
+508:2:3208
+509:2:3212
+510:2:3213
+511:2:3207
+512:2:3208
+513:2:3212
+514:2:3213
+515:2:3221
+516:2:3226
+517:2:3233
+518:2:3234
+519:2:3241
+520:2:3246
+521:2:3253
+522:2:3254
+523:2:3253
+524:2:3254
+525:2:3261
+526:2:3273
+527:2:3274
+528:0:4182
+529:2:2527
+530:0:4182
+531:2:3283
+532:2:3284
+533:0:4182
+534:2:2527
+535:0:4182
+536:2:3288
+537:0:4182
+538:2:3296
+539:0:4182
+540:2:2522
+541:0:4182
+542:2:2524
+543:0:4182
+544:2:2525
+545:0:4182
+546:2:2526
+547:0:4182
+548:2:2527
+549:0:4182
+550:2:2528
+551:2:2529
+552:2:2533
+553:2:2534
+554:2:2542
+555:2:2543
+556:2:2547
+557:2:2548
+558:2:2556
+559:2:2561
+560:2:2565
+561:2:2566
+562:2:2574
+563:2:2575
+564:2:2579
+565:2:2580
+566:2:2574
+567:2:2575
+568:2:2576
+569:2:2588
+570:2:2593
+571:2:2600
+572:2:2601
+573:2:2608
+574:2:2613
+575:2:2620
+576:2:2621
+577:2:2620
+578:2:2621
+579:2:2628
+580:2:2638
+581:0:4182
+582:2:2527
+583:0:4182
+584:2:2642
+585:2:2646
+586:2:2647
+587:2:2651
+588:2:2655
+589:2:2656
+590:2:2660
+591:2:2668
+592:2:2669
+593:2:2673
+594:2:2677
+595:2:2678
+596:2:2673
+597:2:2674
+598:2:2682
+599:0:4182
+600:2:2527
+601:0:4182
+602:2:2690
+603:2:2691
+604:2:2692
+605:0:4182
+606:2:2527
+607:0:4182
+608:2:2700
+609:0:4182
+610:2:2527
+611:0:4182
+612:2:2703
+613:2:2704
+614:2:2708
+615:2:2709
+616:2:2717
+617:2:2718
+618:2:2722
+619:2:2723
+620:2:2731
+621:2:2744
+622:2:2745
+623:2:2749
+624:2:2750
+625:2:2754
+626:2:2755
+627:2:2749
+628:2:2750
+629:2:2754
+630:2:2755
+631:2:2763
+632:2:2768
+633:2:2775
+634:2:2776
+635:2:2783
+636:2:2790
+637:2:2791
+638:2:2795
+639:2:2796
+640:2:2795
+641:2:2796
+642:2:2803
+643:2:2812
+644:0:4182
+645:2:2527
+646:0:4182
+647:2:2816
+648:2:2817
+649:2:2818
+650:2:2830
+651:2:2831
+652:2:2835
+653:2:2836
+654:2:2844
+655:2:2857
+656:2:2858
+657:2:2862
+658:2:2863
+659:2:2867
+660:2:2868
+661:2:2862
+662:2:2863
+663:2:2867
+664:2:2868
+665:2:2876
+666:2:2881
+667:2:2888
+668:2:2889
+669:2:2896
+670:2:2903
+671:2:2904
+672:2:2908
+673:2:2909
+674:2:2908
+675:2:2909
+676:2:2916
+677:2:2929
+678:2:2930
+679:0:4182
+680:2:2527
+681:0:4182
+682:2:2937
+683:2:2938
+684:2:2942
+685:2:2943
+686:2:2951
+687:2:2952
+688:2:2956
+689:2:2957
+690:2:2965
+691:2:2978
+692:2:2979
+693:2:2983
+694:2:2984
+695:2:2988
+696:2:2989
+697:2:2983
+698:2:2984
+699:2:2988
+700:2:2989
+701:2:2997
+702:2:3002
+703:2:3009
+704:2:3010
+705:2:3017
+706:2:3024
+707:2:3025
+708:2:3029
+709:2:3030
+710:2:3029
+711:2:3030
+712:2:3037
+713:0:4182
+714:2:2527
+715:0:4182
+716:2:3161
+717:2:3162
+718:2:3166
+719:2:3167
+720:2:3175
+721:2:3176
+722:2:3180
+723:2:3181
+724:2:3189
+725:2:3202
+726:2:3203
+727:2:3207
+728:2:3208
+729:2:3212
+730:2:3213
+731:2:3207
+732:2:3208
+733:2:3212
+734:2:3213
+735:2:3221
+736:2:3226
+737:2:3233
+738:2:3234
+739:2:3241
+740:2:3248
+741:2:3249
+742:2:3253
+743:2:3254
+744:2:3253
+745:2:3254
+746:2:3261
+747:2:3273
+748:2:3274
+749:0:4182
+750:2:2527
+751:0:4182
+752:2:3283
+753:2:3284
+754:0:4182
+755:2:2527
+756:0:4182
+757:2:3048
+758:2:3049
+759:2:3053
+760:2:3054
+761:2:3062
+762:2:3063
+763:2:3067
+764:2:3068
+765:2:3076
+766:2:3089
+767:2:3090
+768:2:3094
+769:2:3095
+770:2:3096
+771:2:3094
+772:2:3095
+773:2:3099
+774:2:3100
+775:2:3108
+776:2:3113
+777:2:3120
+778:2:3121
+779:2:3128
+780:2:3135
+781:2:3136
+782:2:3140
+783:2:3141
+784:2:3140
+785:2:3141
+786:2:3148
+787:2:3157
+788:0:4182
+789:2:2527
+790:0:4182
+791:2:3288
+792:0:4182
+793:2:3296
+794:0:4182
+795:2:3297
+796:0:4182
+797:2:3302
+798:0:4182
+799:1:2
+800:0:4182
+801:2:3303
+802:0:4182
+803:1:8
+804:0:4182
+805:2:3302
+806:0:4182
+807:1:9
+808:0:4182
+809:2:3303
+810:0:4182
+811:1:10
+812:0:4182
+813:2:3302
+814:0:4182
+815:1:11
+816:0:4182
+817:2:3303
+818:0:4182
+819:1:12
+820:0:4182
+821:2:3302
+822:0:4182
+823:1:13
+824:0:4182
+825:2:3303
+826:0:4182
+827:1:14
+828:0:4182
+829:2:3302
+830:0:4182
+831:1:15
+832:0:4182
+833:2:3303
+834:0:4182
+835:1:16
+836:0:4182
+837:2:3302
+838:0:4182
+839:1:17
+840:0:4182
+841:2:3303
+842:0:4182
+843:1:18
+844:0:4182
+845:2:3302
+846:0:4182
+847:1:19
+848:0:4182
+849:2:3303
+850:0:4182
+851:1:20
+852:0:4182
+853:2:3302
+854:0:4182
+855:1:21
+856:0:4182
+857:2:3303
+858:0:4182
+859:1:122
+860:0:4182
+861:2:3302
+862:0:4182
+863:1:124
+864:0:4182
+865:2:3303
+866:0:4182
+867:1:23
+868:0:4182
+869:2:3302
+870:0:4182
+871:1:130
+872:1:131
+873:1:135
+874:1:136
+875:1:144
+876:1:145
+877:1:149
+878:1:150
+879:1:158
+880:1:163
+881:1:167
+882:1:168
+883:1:176
+884:1:177
+885:1:181
+886:1:182
+887:1:176
+888:1:177
+889:1:181
+890:1:182
+891:1:190
+892:1:195
+893:1:202
+894:1:203
+895:1:210
+896:1:215
+897:1:222
+898:1:223
+899:1:222
+900:1:223
+901:1:230
+902:0:4182
+903:2:3303
+904:0:4182
+905:1:19
+906:0:4182
+907:2:3302
+908:0:4182
+909:1:20
+910:0:4182
+911:2:3303
+912:0:4182
+913:1:21
+914:0:4182
+915:2:3302
+916:0:4182
+917:1:122
+918:0:4182
+919:2:3303
+920:0:4182
+921:1:124
+922:0:4182
+923:2:3302
+924:0:4182
+925:1:23
+926:0:4182
+927:2:3303
+928:0:4182
+929:1:241
+930:1:242
+931:0:4182
+932:2:3302
+933:0:4182
+934:1:19
+935:0:4182
+936:2:3303
+937:0:4182
+938:1:20
+939:0:4182
+940:2:3302
+941:0:4182
+942:1:21
+943:0:4182
+944:2:3303
+945:0:4182
+946:1:122
+947:0:4182
+948:2:3302
+949:0:4182
+950:1:124
+951:0:4182
+952:2:3303
+953:0:4182
+954:1:23
+955:0:4182
+956:2:3302
+957:0:4182
+958:1:248
+959:1:249
+960:1:253
+961:1:254
+962:1:262
+963:1:263
+964:1:267
+965:1:268
+966:1:276
+967:1:281
+968:1:285
+969:1:286
+970:1:294
+971:1:295
+972:1:299
+973:1:300
+974:1:294
+975:1:295
+976:1:299
+977:1:300
+978:1:308
+979:1:313
+980:1:320
+981:1:321
+982:1:328
+983:1:333
+984:1:340
+985:1:341
+986:1:340
+987:1:341
+988:1:348
+989:0:4182
+990:2:3303
+991:0:4182
+992:1:19
+993:0:4182
+994:2:3302
+995:0:4182
+996:1:20
+997:0:4182
+998:2:3303
+999:0:4182
+1000:1:21
+1001:0:4182
+1002:2:3302
+1003:0:4182
+1004:1:122
+1005:0:4182
+1006:2:3303
+1007:0:4182
+1008:1:124
+1009:0:4182
+1010:2:3302
+1011:0:4182
+1012:1:23
+1013:0:4182
+1014:2:3303
+1015:0:4182
+1016:1:359
+1017:1:360
+1018:1:364
+1019:1:365
+1020:1:373
+1021:1:374
+1022:1:378
+1023:1:379
+1024:1:387
+1025:1:392
+1026:1:396
+1027:1:397
+1028:1:405
+1029:1:406
+1030:1:410
+1031:1:411
+1032:1:405
+1033:1:406
+1034:1:410
+1035:1:411
+1036:1:419
+1037:1:424
+1038:1:431
+1039:1:432
+1040:1:439
+1041:1:444
+1042:1:451
+1043:1:452
+1044:1:451
+1045:1:452
+1046:1:459
+1047:1:468
+1048:0:4182
+1049:2:3302
+1050:0:4182
+1051:1:19
+1052:0:4182
+1053:2:3303
+1054:0:4182
+1055:1:20
+1056:0:4182
+1057:2:3302
+1058:0:4182
+1059:1:21
+1060:0:4182
+1061:2:3303
+1062:0:4182
+1063:1:122
+1064:0:4182
+1065:2:3302
+1066:0:4182
+1067:1:124
+1068:0:4182
+1069:2:3303
+1070:0:4182
+1071:1:23
+1072:0:4182
+1073:2:3302
+1074:0:4182
+1075:1:588
+1076:1:589
+1077:1:593
+1078:1:594
+1079:1:602
+1080:1:603
+1081:1:604
+1082:1:616
+1083:1:621
+1084:1:625
+1085:1:626
+1086:1:634
+1087:1:635
+1088:1:639
+1089:1:640
+1090:1:634
+1091:1:635
+1092:1:639
+1093:1:640
+1094:1:648
+1095:1:653
+1096:1:660
+1097:1:661
+1098:1:668
+1099:1:673
+1100:1:680
+1101:1:681
+1102:1:680
+1103:1:681
+1104:1:688
+1105:0:4182
+1106:2:3303
+1107:0:4182
+1108:1:19
+1109:0:4182
+1110:2:3302
+1111:0:4182
+1112:1:20
+1113:0:4182
+1114:2:3303
+1115:0:4182
+1116:1:21
+1117:0:4182
+1118:2:3302
+1119:0:4182
+1120:1:122
+1121:0:4182
+1122:2:3303
+1123:0:4182
+1124:1:124
+1125:0:4182
+1126:2:3302
+1127:0:4182
+1128:1:23
+1129:0:4182
+1130:2:3303
+1131:0:4182
+1132:1:699
+1133:1:702
+1134:1:703
+1135:0:4182
+1136:2:3302
+1137:0:4182
+1138:1:19
+1139:0:4182
+1140:2:3303
+1141:0:4182
+1142:1:20
+1143:0:4182
+1144:2:3302
+1145:0:4182
+1146:1:21
+1147:0:4182
+1148:2:3303
+1149:0:4182
+1150:1:122
+1151:0:4182
+1152:2:3302
+1153:0:4182
+1154:1:124
+1155:0:4182
+1156:2:3303
+1157:0:4182
+1158:1:23
+1159:0:4182
+1160:2:3302
+1161:0:4182
+1162:1:706
+1163:1:707
+1164:1:711
+1165:1:712
+1166:1:720
+1167:1:721
+1168:1:725
+1169:1:726
+1170:1:734
+1171:1:739
+1172:1:743
+1173:1:744
+1174:1:752
+1175:1:753
+1176:1:757
+1177:1:758
+1178:1:752
+1179:1:753
+1180:1:757
+1181:1:758
+1182:1:766
+1183:1:771
+1184:1:778
+1185:1:779
+1186:1:786
+1187:1:791
+1188:1:798
+1189:1:799
+1190:1:798
+1191:1:799
+1192:1:806
+1193:0:4182
+1194:2:3303
+1195:0:4182
+1196:1:19
+1197:0:4182
+1198:2:3302
+1199:0:4182
+1200:1:20
+1201:0:4182
+1202:2:3303
+1203:0:4182
+1204:1:21
+1205:0:4182
+1206:2:3302
+1207:0:4182
+1208:1:122
+1209:0:4182
+1210:2:3303
+1211:0:4182
+1212:1:124
+1213:0:4182
+1214:2:3302
+1215:0:4182
+1216:1:23
+1217:0:4182
+1218:2:3303
+1219:0:4182
+1220:1:930
+1221:1:931
+1222:1:935
+1223:1:936
+1224:1:944
+1225:1:945
+1226:1:949
+1227:1:950
+1228:1:958
+1229:1:963
+1230:1:967
+1231:1:968
+1232:1:976
+1233:1:977
+1234:1:981
+1235:1:982
+1236:1:976
+1237:1:977
+1238:1:981
+1239:1:982
+1240:1:990
+1241:1:995
+1242:1:1002
+1243:1:1003
+1244:1:1010
+1245:1:1015
+1246:1:1022
+1247:1:1023
+1248:1:1022
+1249:1:1023
+1250:1:1030
+1251:1:1039
+1252:1:1043
+1253:0:4182
+1254:2:3302
+1255:0:4182
+1256:1:19
+1257:0:4182
+1258:2:3303
+1259:0:4182
+1260:1:20
+1261:0:4182
+1262:2:3302
+1263:0:4182
+1264:1:21
+1265:0:4182
+1266:2:3303
+1267:0:4182
+1268:1:122
+1269:0:4182
+1270:2:3302
+1271:0:4182
+1272:1:124
+1273:0:4182
+1274:2:3303
+1275:0:4182
+1276:1:23
+1277:0:4182
+1278:2:3302
+1279:0:4182
+1280:1:1044
+1281:1:1045
+1282:1:1049
+1283:1:1050
+1284:1:1058
+1285:1:1059
+1286:1:1060
+1287:1:1072
+1288:1:1077
+1289:1:1081
+1290:1:1082
+1291:1:1090
+1292:1:1091
+1293:1:1095
+1294:1:1096
+1295:1:1090
+1296:1:1091
+1297:1:1095
+1298:1:1096
+1299:1:1104
+1300:1:1109
+1301:1:1116
+1302:1:1117
+1303:1:1124
+1304:1:1129
+1305:1:1136
+1306:1:1137
+1307:1:1136
+1308:1:1137
+1309:1:1144
+1310:0:4182
+1311:2:3303
+1312:0:4182
+1313:1:19
+1314:0:4182
+1315:2:3302
+1316:0:4182
+1317:1:20
+1318:0:4182
+1319:2:3303
+1320:0:4182
+1321:1:21
+1322:0:4182
+1323:2:3302
+1324:0:4182
+1325:1:122
+1326:0:4182
+1327:2:3303
+1328:0:4182
+1329:1:124
+1330:0:4182
+1331:2:3302
+1332:0:4182
+1333:1:23
+1334:0:4182
+1335:2:3303
+1336:0:4182
+1337:1:1155
+1338:0:4182
+1339:2:3302
+1340:0:4182
+1341:1:2421
+1342:1:2428
+1343:1:2429
+1344:1:2436
+1345:1:2441
+1346:1:2448
+1347:1:2449
+1348:1:2448
+1349:1:2449
+1350:1:2456
+1351:1:2460
+1352:0:4182
+1353:2:3303
+1354:0:4182
+1355:1:1157
+1356:1:1158
+1357:0:4180
+1358:2:3302
+1359:0:4186
+1360:1:2169
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_rmb.define b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_rmb.define
new file mode 100644 (file)
index 0000000..73e61a4
--- /dev/null
@@ -0,0 +1 @@
+#define NO_RMB
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_rmb.log b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_rmb.log
new file mode 100644 (file)
index 0000000..42698f6
--- /dev/null
@@ -0,0 +1,510 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_rmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    9420 States=    1e+06 Transitions= 7.12e+06 Memory=   550.432        t=   17.7 R=   6e+04
+Depth=    9420 States=    2e+06 Transitions= 1.53e+07 Memory=   634.318        t=   38.9 R=   5e+04
+Depth=    9420 States=    3e+06 Transitions= 2.54e+07 Memory=   718.303        t=   66.3 R=   5e+04
+pan: resizing hashtable to -w22..  done
+Depth=    9420 States=    4e+06 Transitions=  3.3e+07 Memory=   833.311        t=   86.1 R=   5e+04
+Depth=    9420 States=    5e+06 Transitions= 4.08e+07 Memory=   917.295        t=    106 R=   5e+04
+Depth=    9420 States=    6e+06 Transitions= 5.88e+07 Memory=  1001.279        t=    157 R=   4e+04
+Depth=    9420 States=    7e+06 Transitions= 7.01e+07 Memory=  1085.264        t=    187 R=   4e+04
+Depth=    9420 States=    8e+06 Transitions= 8.44e+07 Memory=  1169.151        t=    227 R=   4e+04
+Depth=    9420 States=    9e+06 Transitions= 9.77e+07 Memory=  1253.135        t=    264 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=    9420 States=    1e+07 Transitions= 1.11e+08 Memory=  1461.115        t=    302 R=   3e+04
+Depth=    9420 States=  1.1e+07 Transitions= 1.24e+08 Memory=  1545.100        t=    336 R=   3e+04
+Depth=    9420 States=  1.2e+07 Transitions= 1.33e+08 Memory=  1629.084        t=    362 R=   3e+04
+Depth=    9420 States=  1.3e+07 Transitions= 1.46e+08 Memory=  1713.068        t=    397 R=   3e+04
+Depth=    9420 States=  1.4e+07 Transitions= 1.76e+08 Memory=  1797.053        t=    485 R=   3e+04
+Depth=    9420 States=  1.5e+07 Transitions= 1.95e+08 Memory=  1881.037        t=    540 R=   3e+04
+Depth=    9420 States=  1.6e+07 Transitions= 2.12e+08 Memory=  1964.924        t=    587 R=   3e+04
+Depth=    9420 States=  1.7e+07 Transitions= 2.25e+08 Memory=  2048.908        t=    622 R=   3e+04
+Depth=    9420 States=  1.8e+07 Transitions= 2.44e+08 Memory=  2132.893        t=    676 R=   3e+04
+Depth=    9420 States=  1.9e+07 Transitions=  2.6e+08 Memory=  2216.877        t=    721 R=   3e+04
+Depth=    9420 States=    2e+07 Transitions= 2.78e+08 Memory=  2300.861        t=    771 R=   3e+04
+Depth=    9522 States=  2.1e+07 Transitions= 2.91e+08 Memory=  2384.846        t=    808 R=   3e+04
+Depth=    9542 States=  2.2e+07 Transitions= 3.05e+08 Memory=  2468.830        t=    847 R=   3e+04
+Depth=    9542 States=  2.3e+07 Transitions= 3.16e+08 Memory=  2552.717        t=    878 R=   3e+04
+Depth=    9542 States=  2.4e+07 Transitions= 3.28e+08 Memory=  2636.701        t=    910 R=   3e+04
+Depth=    9542 States=  2.5e+07 Transitions= 3.41e+08 Memory=  2720.686        t=    947 R=   3e+04
+Depth=    9542 States=  2.6e+07 Transitions= 3.53e+08 Memory=  2804.670        t=    979 R=   3e+04
+Depth=    9542 States=  2.7e+07 Transitions= 3.66e+08 Memory=  2888.654        t= 1.02e+03 R=   3e+04
+Depth=    9542 States=  2.8e+07 Transitions= 3.79e+08 Memory=  2972.639        t= 1.05e+03 R=   3e+04
+Depth=    9542 States=  2.9e+07 Transitions= 3.92e+08 Memory=  3056.526        t= 1.09e+03 R=   3e+04
+Depth=    9542 States=    3e+07 Transitions= 4.04e+08 Memory=  3140.510        t= 1.12e+03 R=   3e+04
+Depth=    9542 States=  3.1e+07 Transitions= 4.17e+08 Memory=  3224.494        t= 1.16e+03 R=   3e+04
+Depth=    9542 States=  3.2e+07 Transitions= 4.28e+08 Memory=  3308.479        t= 1.19e+03 R=   3e+04
+Depth=    9542 States=  3.3e+07 Transitions= 4.39e+08 Memory=  3392.463        t= 1.22e+03 R=   3e+04
+Depth=    9542 States=  3.4e+07 Transitions= 4.53e+08 Memory=  3476.447        t= 1.26e+03 R=   3e+04
+pan: resizing hashtable to -w26..  done
+Depth=    9542 States=  3.5e+07 Transitions=  4.7e+08 Memory=  4056.416        t= 1.32e+03 R=   3e+04
+Depth=    9542 States=  3.6e+07 Transitions= 4.83e+08 Memory=  4140.401        t= 1.35e+03 R=   3e+04
+Depth=    9542 States=  3.7e+07 Transitions= 4.99e+08 Memory=  4224.385        t= 1.4e+03 R=   3e+04
+Depth=    9542 States=  3.8e+07 Transitions= 5.14e+08 Memory=  4308.369        t= 1.44e+03 R=   3e+04
+Depth=    9542 States=  3.9e+07 Transitions= 5.29e+08 Memory=  4392.354        t= 1.48e+03 R=   3e+04
+Depth=    9542 States=    4e+07 Transitions= 5.39e+08 Memory=  4476.338        t= 1.51e+03 R=   3e+04
+Depth=    9542 States=  4.1e+07 Transitions= 5.49e+08 Memory=  4560.225        t= 1.53e+03 R=   3e+04
+Depth=    9542 States=  4.2e+07 Transitions= 5.61e+08 Memory=  4644.209        t= 1.56e+03 R=   3e+04
+Depth=    9542 States=  4.3e+07 Transitions= 5.88e+08 Memory=  4728.193        t= 1.64e+03 R=   3e+04
+Depth=    9542 States=  4.4e+07 Transitions= 6.14e+08 Memory=  4812.178        t= 1.72e+03 R=   3e+04
+Depth=    9542 States=  4.5e+07 Transitions= 6.31e+08 Memory=  4896.162        t= 1.76e+03 R=   3e+04
+Depth=    9542 States=  4.6e+07 Transitions= 6.41e+08 Memory=  4980.147        t= 1.79e+03 R=   3e+04
+Depth=    9542 States=  4.7e+07 Transitions= 6.57e+08 Memory=  5064.131        t= 1.84e+03 R=   3e+04
+Depth=    9542 States=  4.8e+07 Transitions= 6.76e+08 Memory=  5148.018        t= 1.89e+03 R=   3e+04
+Depth=    9542 States=  4.9e+07 Transitions= 6.93e+08 Memory=  5232.002        t= 1.94e+03 R=   3e+04
+Depth=    9542 States=    5e+07 Transitions= 7.08e+08 Memory=  5315.986        t= 1.98e+03 R=   3e+04
+Depth=    9542 States=  5.1e+07 Transitions= 7.21e+08 Memory=  5399.971        t= 2.02e+03 R=   3e+04
+Depth=    9542 States=  5.2e+07 Transitions= 7.34e+08 Memory=  5483.955        t= 2.05e+03 R=   3e+04
+Depth=    9542 States=  5.3e+07 Transitions= 7.47e+08 Memory=  5567.940        t= 2.09e+03 R=   3e+04
+Depth=    9542 States=  5.4e+07 Transitions= 7.58e+08 Memory=  5651.826        t= 2.12e+03 R=   3e+04
+Depth=    9542 States=  5.5e+07 Transitions= 7.71e+08 Memory=  5735.811        t= 2.15e+03 R=   3e+04
+Depth=    9542 States=  5.6e+07 Transitions= 7.83e+08 Memory=  5819.795        t= 2.19e+03 R=   3e+04
+Depth=    9542 States=  5.7e+07 Transitions= 7.98e+08 Memory=  5903.779        t= 2.23e+03 R=   3e+04
+Depth=    9542 States=  5.8e+07 Transitions= 8.08e+08 Memory=  5987.764        t= 2.25e+03 R=   3e+04
+Depth=    9542 States=  5.9e+07 Transitions= 8.22e+08 Memory=  6071.748        t= 2.29e+03 R=   3e+04
+Depth=    9542 States=    6e+07 Transitions= 8.33e+08 Memory=  6155.733        t= 2.32e+03 R=   3e+04
+Depth=    9542 States=  6.1e+07 Transitions= 8.44e+08 Memory=  6239.619        t= 2.35e+03 R=   3e+04
+Depth=    9542 States=  6.2e+07 Transitions= 8.56e+08 Memory=  6323.604        t= 2.39e+03 R=   3e+04
+Depth=    9542 States=  6.3e+07 Transitions= 8.74e+08 Memory=  6407.588        t= 2.44e+03 R=   3e+04
+Depth=    9542 States=  6.4e+07 Transitions= 8.86e+08 Memory=  6491.572        t= 2.47e+03 R=   3e+04
+Depth=    9542 States=  6.5e+07 Transitions= 9.01e+08 Memory=  6575.557        t= 2.51e+03 R=   3e+04
+Depth=    9542 States=  6.6e+07 Transitions= 9.17e+08 Memory=  6659.541        t= 2.55e+03 R=   3e+04
+Depth=    9542 States=  6.7e+07 Transitions= 9.31e+08 Memory=  6743.428        t= 2.59e+03 R=   3e+04
+Depth=    9542 States=  6.8e+07 Transitions= 9.46e+08 Memory=  6827.412        t= 2.64e+03 R=   3e+04
+Depth=    9542 States=  6.9e+07 Transitions=  9.6e+08 Memory=  6911.397        t= 2.67e+03 R=   3e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 9542, errors: 0
+ 69795266 states, stored
+9.0126381e+08 states, matched
+9.7105908e+08 transitions (= stored+matched)
+1.4943649e+10 atomic steps
+hash conflicts: 5.4028862e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 7721.187      equivalent memory usage for states (stored*(State-vector + overhead))
+ 6011.798      actual memory usage for states (compression: 77.86%)
+               state-vector as stored = 62 byte + 28 byte overhead
+  512.000      memory used for hash table (-w26)
+  457.764      memory used for DFS stack (-m10000000)
+    3.368      memory lost to fragmentation
+ 6978.193      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 272, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 77, "(1)"
+       line 253, "pan.___", state 85, "(1)"
+       line 257, "pan.___", state 97, "(1)"
+       line 261, "pan.___", state 105, "(1)"
+       line 411, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 195, "(1)"
+       line 257, "pan.___", state 215, "(1)"
+       line 261, "pan.___", state 223, "(1)"
+       line 691, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 313, "(1)"
+       line 257, "pan.___", state 333, "(1)"
+       line 261, "pan.___", state 341, "(1)"
+       line 411, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 424, "(1)"
+       line 257, "pan.___", state 444, "(1)"
+       line 261, "pan.___", state 452, "(1)"
+       line 411, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 475, "(1)"
+       line 411, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 476, "else"
+       line 411, "pan.___", state 479, "(1)"
+       line 415, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 489, "(1)"
+       line 415, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 490, "else"
+       line 415, "pan.___", state 493, "(1)"
+       line 415, "pan.___", state 494, "(1)"
+       line 415, "pan.___", state 494, "(1)"
+       line 413, "pan.___", state 499, "((i<1))"
+       line 413, "pan.___", state 499, "((i>=1))"
+       line 420, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 507, "(1)"
+       line 420, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 508, "else"
+       line 420, "pan.___", state 511, "(1)"
+       line 420, "pan.___", state 512, "(1)"
+       line 420, "pan.___", state 512, "(1)"
+       line 424, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 521, "(1)"
+       line 424, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 522, "else"
+       line 424, "pan.___", state 525, "(1)"
+       line 424, "pan.___", state 526, "(1)"
+       line 424, "pan.___", state 526, "(1)"
+       line 422, "pan.___", state 531, "((i<2))"
+       line 422, "pan.___", state 531, "((i>=2))"
+       line 249, "pan.___", state 537, "(1)"
+       line 253, "pan.___", state 545, "(1)"
+       line 253, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 546, "else"
+       line 251, "pan.___", state 551, "((i<1))"
+       line 251, "pan.___", state 551, "((i>=1))"
+       line 257, "pan.___", state 557, "(1)"
+       line 257, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 558, "else"
+       line 261, "pan.___", state 565, "(1)"
+       line 261, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 566, "else"
+       line 259, "pan.___", state 571, "((i<2))"
+       line 259, "pan.___", state 571, "((i>=2))"
+       line 266, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 575, "else"
+       line 431, "pan.___", state 577, "(1)"
+       line 431, "pan.___", state 577, "(1)"
+       line 691, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 691, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 691, "pan.___", state 582, "(1)"
+       line 411, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 653, "(1)"
+       line 257, "pan.___", state 673, "(1)"
+       line 261, "pan.___", state 681, "(1)"
+       line 411, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 771, "(1)"
+       line 257, "pan.___", state 791, "(1)"
+       line 261, "pan.___", state 799, "(1)"
+       line 411, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 820, "(1)"
+       line 411, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 821, "else"
+       line 411, "pan.___", state 824, "(1)"
+       line 415, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 834, "(1)"
+       line 415, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 835, "else"
+       line 415, "pan.___", state 838, "(1)"
+       line 415, "pan.___", state 839, "(1)"
+       line 415, "pan.___", state 839, "(1)"
+       line 413, "pan.___", state 844, "((i<1))"
+       line 413, "pan.___", state 844, "((i>=1))"
+       line 420, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 852, "(1)"
+       line 420, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 853, "else"
+       line 420, "pan.___", state 856, "(1)"
+       line 420, "pan.___", state 857, "(1)"
+       line 420, "pan.___", state 857, "(1)"
+       line 424, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 866, "(1)"
+       line 424, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 867, "else"
+       line 424, "pan.___", state 870, "(1)"
+       line 424, "pan.___", state 871, "(1)"
+       line 424, "pan.___", state 871, "(1)"
+       line 422, "pan.___", state 876, "((i<2))"
+       line 422, "pan.___", state 876, "((i>=2))"
+       line 249, "pan.___", state 882, "(1)"
+       line 253, "pan.___", state 890, "(1)"
+       line 253, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 891, "else"
+       line 251, "pan.___", state 896, "((i<1))"
+       line 251, "pan.___", state 896, "((i>=1))"
+       line 257, "pan.___", state 902, "(1)"
+       line 257, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 903, "else"
+       line 261, "pan.___", state 910, "(1)"
+       line 261, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 911, "else"
+       line 259, "pan.___", state 916, "((i<2))"
+       line 259, "pan.___", state 916, "((i>=2))"
+       line 266, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 920, "else"
+       line 431, "pan.___", state 922, "(1)"
+       line 431, "pan.___", state 922, "(1)"
+       line 699, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 995, "(1)"
+       line 257, "pan.___", state 1015, "(1)"
+       line 261, "pan.___", state 1023, "(1)"
+       line 411, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1109, "(1)"
+       line 257, "pan.___", state 1129, "(1)"
+       line 261, "pan.___", state 1137, "(1)"
+       line 411, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1224, "(1)"
+       line 257, "pan.___", state 1244, "(1)"
+       line 261, "pan.___", state 1252, "(1)"
+       line 411, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1335, "(1)"
+       line 257, "pan.___", state 1355, "(1)"
+       line 261, "pan.___", state 1363, "(1)"
+       line 411, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1451, "(1)"
+       line 257, "pan.___", state 1471, "(1)"
+       line 261, "pan.___", state 1479, "(1)"
+       line 411, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1562, "(1)"
+       line 257, "pan.___", state 1582, "(1)"
+       line 261, "pan.___", state 1590, "(1)"
+       line 411, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1676, "(1)"
+       line 257, "pan.___", state 1696, "(1)"
+       line 261, "pan.___", state 1704, "(1)"
+       line 738, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1794, "(1)"
+       line 257, "pan.___", state 1814, "(1)"
+       line 261, "pan.___", state 1822, "(1)"
+       line 411, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1905, "(1)"
+       line 257, "pan.___", state 1925, "(1)"
+       line 261, "pan.___", state 1933, "(1)"
+       line 411, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1956, "(1)"
+       line 411, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 1957, "else"
+       line 411, "pan.___", state 1960, "(1)"
+       line 415, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1970, "(1)"
+       line 415, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 1971, "else"
+       line 415, "pan.___", state 1974, "(1)"
+       line 415, "pan.___", state 1975, "(1)"
+       line 415, "pan.___", state 1975, "(1)"
+       line 413, "pan.___", state 1980, "((i<1))"
+       line 413, "pan.___", state 1980, "((i>=1))"
+       line 420, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1988, "(1)"
+       line 420, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 1989, "else"
+       line 420, "pan.___", state 1992, "(1)"
+       line 420, "pan.___", state 1993, "(1)"
+       line 420, "pan.___", state 1993, "(1)"
+       line 424, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2002, "(1)"
+       line 424, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2003, "else"
+       line 424, "pan.___", state 2006, "(1)"
+       line 424, "pan.___", state 2007, "(1)"
+       line 424, "pan.___", state 2007, "(1)"
+       line 422, "pan.___", state 2012, "((i<2))"
+       line 422, "pan.___", state 2012, "((i>=2))"
+       line 249, "pan.___", state 2018, "(1)"
+       line 253, "pan.___", state 2026, "(1)"
+       line 253, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2027, "else"
+       line 251, "pan.___", state 2032, "((i<1))"
+       line 251, "pan.___", state 2032, "((i>=1))"
+       line 257, "pan.___", state 2038, "(1)"
+       line 257, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2039, "else"
+       line 261, "pan.___", state 2046, "(1)"
+       line 261, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2047, "else"
+       line 259, "pan.___", state 2052, "((i<2))"
+       line 259, "pan.___", state 2052, "((i>=2))"
+       line 266, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2056, "else"
+       line 431, "pan.___", state 2058, "(1)"
+       line 431, "pan.___", state 2058, "(1)"
+       line 738, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 738, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 738, "pan.___", state 2063, "(1)"
+       line 411, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2134, "(1)"
+       line 257, "pan.___", state 2154, "(1)"
+       line 261, "pan.___", state 2162, "(1)"
+       line 411, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2251, "(1)"
+       line 257, "pan.___", state 2271, "(1)"
+       line 261, "pan.___", state 2279, "(1)"
+       line 411, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2362, "(1)"
+       line 257, "pan.___", state 2382, "(1)"
+       line 261, "pan.___", state 2390, "(1)"
+       line 411, "pan.___", state 2421, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2453, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2467, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2485, "(1)"
+       line 257, "pan.___", state 2505, "(1)"
+       line 261, "pan.___", state 2513, "(1)"
+       line 411, "pan.___", state 2530, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2562, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2576, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2594, "(1)"
+       line 257, "pan.___", state 2614, "(1)"
+       line 261, "pan.___", state 2622, "(1)"
+       line 898, "pan.___", state 2641, "-end-"
+       (227 of 2641 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 82, "(1)"
+       line 253, "pan.___", state 90, "(1)"
+       line 257, "pan.___", state 102, "(1)"
+       line 272, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 257, "(1)"
+       line 253, "pan.___", state 265, "(1)"
+       line 257, "pan.___", state 277, "(1)"
+       line 261, "pan.___", state 285, "(1)"
+       line 415, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 378, "(1)"
+       line 257, "pan.___", state 390, "(1)"
+       line 261, "pan.___", state 398, "(1)"
+       line 415, "pan.___", state 441, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 499, "(1)"
+       line 257, "pan.___", state 511, "(1)"
+       line 261, "pan.___", state 519, "(1)"
+       line 415, "pan.___", state 552, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 570, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 584, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 610, "(1)"
+       line 257, "pan.___", state 622, "(1)"
+       line 261, "pan.___", state 630, "(1)"
+       line 415, "pan.___", state 665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 723, "(1)"
+       line 257, "pan.___", state 735, "(1)"
+       line 261, "pan.___", state 743, "(1)"
+       line 272, "pan.___", state 796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 805, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 820, "(1)"
+       line 284, "pan.___", state 827, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 843, "(1)"
+       line 253, "pan.___", state 851, "(1)"
+       line 257, "pan.___", state 863, "(1)"
+       line 261, "pan.___", state 871, "(1)"
+       line 272, "pan.___", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 911, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 924, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 933, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 949, "(1)"
+       line 253, "pan.___", state 957, "(1)"
+       line 257, "pan.___", state 969, "(1)"
+       line 261, "pan.___", state 977, "(1)"
+       line 276, "pan.___", state 1003, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1016, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1025, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1041, "(1)"
+       line 253, "pan.___", state 1049, "(1)"
+       line 257, "pan.___", state 1061, "(1)"
+       line 261, "pan.___", state 1069, "(1)"
+       line 272, "pan.___", state 1100, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1109, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1122, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1131, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1147, "(1)"
+       line 253, "pan.___", state 1155, "(1)"
+       line 257, "pan.___", state 1167, "(1)"
+       line 261, "pan.___", state 1175, "(1)"
+       line 276, "pan.___", state 1201, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1214, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1239, "(1)"
+       line 253, "pan.___", state 1247, "(1)"
+       line 257, "pan.___", state 1259, "(1)"
+       line 261, "pan.___", state 1267, "(1)"
+       line 272, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1307, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1320, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1329, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1345, "(1)"
+       line 253, "pan.___", state 1353, "(1)"
+       line 257, "pan.___", state 1365, "(1)"
+       line 261, "pan.___", state 1373, "(1)"
+       line 276, "pan.___", state 1399, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1412, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1421, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1437, "(1)"
+       line 253, "pan.___", state 1445, "(1)"
+       line 257, "pan.___", state 1457, "(1)"
+       line 261, "pan.___", state 1465, "(1)"
+       line 272, "pan.___", state 1496, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1505, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1518, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1527, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1543, "(1)"
+       line 253, "pan.___", state 1551, "(1)"
+       line 257, "pan.___", state 1563, "(1)"
+       line 261, "pan.___", state 1571, "(1)"
+       line 1237, "pan.___", state 1587, "-end-"
+       (103 of 1587 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 2.7e+03 seconds
+pan: rate 25814.341 states/second
+pan: avg transition delay 2.7843e-06 usec
+cp .input.spin urcu_free_no_rmb.spin.input
+cp .input.spin.trail urcu_free_no_rmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_rmb.spin.input b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_rmb.spin.input
new file mode 100644 (file)
index 0000000..80445f6
--- /dev/null
@@ -0,0 +1,1273 @@
+#define NO_RMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.define b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.define
new file mode 100644 (file)
index 0000000..710f29d
--- /dev/null
@@ -0,0 +1 @@
+#define NO_WMB
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.log b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.log
new file mode 100644 (file)
index 0000000..319fbe9
--- /dev/null
@@ -0,0 +1,530 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_wmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    9172 States=    1e+06 Transitions= 6.87e+06 Memory=   550.432        t=     17 R=   6e+04
+Depth=    9172 States=    2e+06 Transitions= 1.47e+07 Memory=   634.318        t=   37.6 R=   5e+04
+Depth=    9172 States=    3e+06 Transitions= 2.46e+07 Memory=   718.303        t=   64.2 R=   5e+04
+pan: resizing hashtable to -w22..  done
+Depth=    9172 States=    4e+06 Transitions= 3.19e+07 Memory=   833.311        t=   83.2 R=   5e+04
+Depth=    9172 States=    5e+06 Transitions= 3.95e+07 Memory=   917.295        t=    103 R=   5e+04
+Depth=    9172 States=    6e+06 Transitions= 5.71e+07 Memory=  1001.279        t=    152 R=   4e+04
+Depth=    9172 States=    7e+06 Transitions= 6.81e+07 Memory=  1085.264        t=    182 R=   4e+04
+Depth=    9172 States=    8e+06 Transitions= 8.22e+07 Memory=  1169.151        t=    221 R=   4e+04
+Depth=    9172 States=    9e+06 Transitions= 9.54e+07 Memory=  1253.135        t=    258 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=    9172 States=    1e+07 Transitions= 1.08e+08 Memory=  1461.115        t=    295 R=   3e+04
+Depth=    9172 States=  1.1e+07 Transitions=  1.2e+08 Memory=  1545.100        t=    327 R=   3e+04
+Depth=    9172 States=  1.2e+07 Transitions= 1.27e+08 Memory=  1629.084        t=    344 R=   3e+04
+Depth=    9172 States=  1.3e+07 Transitions= 1.35e+08 Memory=  1713.068        t=    365 R=   4e+04
+Depth=    9172 States=  1.4e+07 Transitions= 1.46e+08 Memory=  1797.053        t=    394 R=   4e+04
+Depth=    9172 States=  1.5e+07 Transitions= 1.53e+08 Memory=  1881.037        t=    413 R=   4e+04
+Depth=    9172 States=  1.6e+07 Transitions=  1.6e+08 Memory=  1964.924        t=    432 R=   4e+04
+Depth=    9172 States=  1.7e+07 Transitions= 1.76e+08 Memory=  2048.908        t=    475 R=   4e+04
+Depth=    9172 States=  1.8e+07 Transitions=  1.9e+08 Memory=  2132.893        t=    515 R=   3e+04
+Depth=    9172 States=  1.9e+07 Transitions= 2.02e+08 Memory=  2216.877        t=    548 R=   3e+04
+Depth=    9172 States=    2e+07 Transitions= 2.15e+08 Memory=  2300.861        t=    585 R=   3e+04
+Depth=    9172 States=  2.1e+07 Transitions= 2.25e+08 Memory=  2384.846        t=    610 R=   3e+04
+Depth=    9172 States=  2.2e+07 Transitions= 2.38e+08 Memory=  2468.830        t=    646 R=   3e+04
+Depth=    9172 States=  2.3e+07 Transitions= 2.67e+08 Memory=  2552.717        t=    732 R=   3e+04
+Depth=    9172 States=  2.4e+07 Transitions= 2.86e+08 Memory=  2636.701        t=    785 R=   3e+04
+Depth=    9172 States=  2.5e+07 Transitions= 3.03e+08 Memory=  2720.686        t=    832 R=   3e+04
+Depth=    9172 States=  2.6e+07 Transitions= 3.12e+08 Memory=  2804.670        t=    857 R=   3e+04
+Depth=    9172 States=  2.7e+07 Transitions= 3.27e+08 Memory=  2888.654        t=    899 R=   3e+04
+Depth=    9172 States=  2.8e+07 Transitions= 3.57e+08 Memory=  2972.639        t=    987 R=   3e+04
+Depth=    9172 States=  2.9e+07 Transitions= 3.73e+08 Memory=  3056.526        t= 1.03e+03 R=   3e+04
+Depth=    9172 States=    3e+07 Transitions= 3.86e+08 Memory=  3140.510        t= 1.07e+03 R=   3e+04
+Depth=    9172 States=  3.1e+07 Transitions= 4.03e+08 Memory=  3224.494        t= 1.12e+03 R=   3e+04
+Depth=    9172 States=  3.2e+07 Transitions= 4.22e+08 Memory=  3308.479        t= 1.17e+03 R=   3e+04
+Depth=    9172 States=  3.3e+07 Transitions= 4.39e+08 Memory=  3392.463        t= 1.22e+03 R=   3e+04
+Depth=    9172 States=  3.4e+07 Transitions= 4.56e+08 Memory=  3476.447        t= 1.27e+03 R=   3e+04
+pan: resizing hashtable to -w26..  done
+Depth=    9234 States=  3.5e+07 Transitions= 4.73e+08 Memory=  4056.416        t= 1.33e+03 R=   3e+04
+Depth=    9234 States=  3.6e+07 Transitions= 4.85e+08 Memory=  4140.401        t= 1.36e+03 R=   3e+04
+Depth=    9273 States=  3.7e+07 Transitions=    5e+08 Memory=  4224.385        t= 1.4e+03 R=   3e+04
+Depth=    9273 States=  3.8e+07 Transitions= 5.14e+08 Memory=  4308.369        t= 1.44e+03 R=   3e+04
+Depth=    9273 States=  3.9e+07 Transitions= 5.26e+08 Memory=  4392.354        t= 1.47e+03 R=   3e+04
+Depth=    9273 States=    4e+07 Transitions= 5.39e+08 Memory=  4476.338        t= 1.51e+03 R=   3e+04
+Depth=    9273 States=  4.1e+07 Transitions=  5.5e+08 Memory=  4560.322        t= 1.54e+03 R=   3e+04
+Depth=    9273 States=  4.2e+07 Transitions= 5.64e+08 Memory=  4644.209        t= 1.58e+03 R=   3e+04
+Depth=    9273 States=  4.3e+07 Transitions= 5.75e+08 Memory=  4728.193        t= 1.61e+03 R=   3e+04
+Depth=    9273 States=  4.4e+07 Transitions= 5.86e+08 Memory=  4812.178        t= 1.64e+03 R=   3e+04
+Depth=    9273 States=  4.5e+07 Transitions= 6.01e+08 Memory=  4896.162        t= 1.68e+03 R=   3e+04
+Depth=    9273 States=  4.6e+07 Transitions= 6.16e+08 Memory=  4980.147        t= 1.72e+03 R=   3e+04
+Depth=    9273 States=  4.7e+07 Transitions= 6.29e+08 Memory=  5064.131        t= 1.76e+03 R=   3e+04
+Depth=    9273 States=  4.8e+07 Transitions=  6.4e+08 Memory=  5148.018        t= 1.79e+03 R=   3e+04
+Depth=    9273 States=  4.9e+07 Transitions= 6.51e+08 Memory=  5232.002        t= 1.82e+03 R=   3e+04
+Depth=    9273 States=    5e+07 Transitions= 6.66e+08 Memory=  5315.986        t= 1.86e+03 R=   3e+04
+Depth=    9273 States=  5.1e+07 Transitions=  6.8e+08 Memory=  5399.971        t= 1.9e+03 R=   3e+04
+Depth=    9273 States=  5.2e+07 Transitions=  6.9e+08 Memory=  5483.955        t= 1.93e+03 R=   3e+04
+Depth=    9273 States=  5.3e+07 Transitions= 7.01e+08 Memory=  5567.940        t= 1.96e+03 R=   3e+04
+Depth=    9273 States=  5.4e+07 Transitions= 7.14e+08 Memory=  5651.826        t= 1.99e+03 R=   3e+04
+Depth=    9273 States=  5.5e+07 Transitions= 7.26e+08 Memory=  5735.811        t= 2.02e+03 R=   3e+04
+Depth=    9273 States=  5.6e+07 Transitions= 7.44e+08 Memory=  5819.795        t= 2.07e+03 R=   3e+04
+Depth=    9273 States=  5.7e+07 Transitions= 7.57e+08 Memory=  5903.779        t= 2.11e+03 R=   3e+04
+Depth=    9273 States=  5.8e+07 Transitions= 7.72e+08 Memory=  5987.764        t= 2.15e+03 R=   3e+04
+Depth=    9273 States=  5.9e+07 Transitions= 7.87e+08 Memory=  6071.748        t= 2.19e+03 R=   3e+04
+Depth=    9273 States=    6e+07 Transitions= 8.03e+08 Memory=  6155.733        t= 2.24e+03 R=   3e+04
+Depth=    9273 States=  6.1e+07 Transitions= 8.18e+08 Memory=  6239.619        t= 2.28e+03 R=   3e+04
+Depth=    9273 States=  6.2e+07 Transitions= 8.31e+08 Memory=  6323.604        t= 2.31e+03 R=   3e+04
+Depth=    9273 States=  6.3e+07 Transitions= 8.38e+08 Memory=  6407.588        t= 2.33e+03 R=   3e+04
+Depth=    9273 States=  6.4e+07 Transitions= 8.46e+08 Memory=  6491.572        t= 2.35e+03 R=   3e+04
+Depth=    9273 States=  6.5e+07 Transitions= 8.56e+08 Memory=  6575.557        t= 2.38e+03 R=   3e+04
+Depth=    9273 States=  6.6e+07 Transitions= 8.63e+08 Memory=  6659.541        t= 2.4e+03 R=   3e+04
+Depth=    9273 States=  6.7e+07 Transitions=  8.7e+08 Memory=  6743.428        t= 2.42e+03 R=   3e+04
+Depth=    9273 States=  6.8e+07 Transitions= 8.88e+08 Memory=  6827.412        t= 2.47e+03 R=   3e+04
+Depth=    9273 States=  6.9e+07 Transitions=    9e+08 Memory=  6911.397        t= 2.5e+03 R=   3e+04
+pan: claim violated! (at depth 1431)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 9273, errors: 1
+ 69874699 states, stored
+8.4222224e+08 states, matched
+9.1209694e+08 transitions (= stored+matched)
+1.3919028e+10 atomic steps
+hash conflicts: 5.0632776e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 7729.974      equivalent memory usage for states (stored*(State-vector + overhead))
+ 6018.458      actual memory usage for states (compression: 77.86%)
+               state-vector as stored = 62 byte + 28 byte overhead
+  512.000      memory used for hash table (-w26)
+  457.764      memory used for DFS stack (-m10000000)
+    3.388      memory lost to fragmentation
+ 6984.834      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 272, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 77, "(1)"
+       line 253, "pan.___", state 85, "(1)"
+       line 257, "pan.___", state 97, "(1)"
+       line 261, "pan.___", state 105, "(1)"
+       line 411, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 195, "(1)"
+       line 257, "pan.___", state 215, "(1)"
+       line 261, "pan.___", state 223, "(1)"
+       line 691, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 313, "(1)"
+       line 257, "pan.___", state 333, "(1)"
+       line 261, "pan.___", state 341, "(1)"
+       line 411, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 424, "(1)"
+       line 257, "pan.___", state 444, "(1)"
+       line 261, "pan.___", state 452, "(1)"
+       line 411, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 475, "(1)"
+       line 411, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 476, "else"
+       line 411, "pan.___", state 479, "(1)"
+       line 415, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 489, "(1)"
+       line 415, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 490, "else"
+       line 415, "pan.___", state 493, "(1)"
+       line 415, "pan.___", state 494, "(1)"
+       line 415, "pan.___", state 494, "(1)"
+       line 413, "pan.___", state 499, "((i<1))"
+       line 413, "pan.___", state 499, "((i>=1))"
+       line 420, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 507, "(1)"
+       line 420, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 508, "else"
+       line 420, "pan.___", state 511, "(1)"
+       line 420, "pan.___", state 512, "(1)"
+       line 420, "pan.___", state 512, "(1)"
+       line 424, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 521, "(1)"
+       line 424, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 522, "else"
+       line 424, "pan.___", state 525, "(1)"
+       line 424, "pan.___", state 526, "(1)"
+       line 424, "pan.___", state 526, "(1)"
+       line 422, "pan.___", state 531, "((i<2))"
+       line 422, "pan.___", state 531, "((i>=2))"
+       line 249, "pan.___", state 537, "(1)"
+       line 253, "pan.___", state 545, "(1)"
+       line 253, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 546, "else"
+       line 251, "pan.___", state 551, "((i<1))"
+       line 251, "pan.___", state 551, "((i>=1))"
+       line 257, "pan.___", state 557, "(1)"
+       line 257, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 558, "else"
+       line 261, "pan.___", state 565, "(1)"
+       line 261, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 566, "else"
+       line 259, "pan.___", state 571, "((i<2))"
+       line 259, "pan.___", state 571, "((i>=2))"
+       line 266, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 575, "else"
+       line 431, "pan.___", state 577, "(1)"
+       line 431, "pan.___", state 577, "(1)"
+       line 691, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 691, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 691, "pan.___", state 582, "(1)"
+       line 411, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 653, "(1)"
+       line 257, "pan.___", state 673, "(1)"
+       line 261, "pan.___", state 681, "(1)"
+       line 411, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 771, "(1)"
+       line 257, "pan.___", state 791, "(1)"
+       line 261, "pan.___", state 799, "(1)"
+       line 411, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 820, "(1)"
+       line 411, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 821, "else"
+       line 411, "pan.___", state 824, "(1)"
+       line 415, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 834, "(1)"
+       line 415, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 835, "else"
+       line 415, "pan.___", state 838, "(1)"
+       line 415, "pan.___", state 839, "(1)"
+       line 415, "pan.___", state 839, "(1)"
+       line 413, "pan.___", state 844, "((i<1))"
+       line 413, "pan.___", state 844, "((i>=1))"
+       line 420, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 852, "(1)"
+       line 420, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 853, "else"
+       line 420, "pan.___", state 856, "(1)"
+       line 420, "pan.___", state 857, "(1)"
+       line 420, "pan.___", state 857, "(1)"
+       line 424, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 866, "(1)"
+       line 424, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 867, "else"
+       line 424, "pan.___", state 870, "(1)"
+       line 424, "pan.___", state 871, "(1)"
+       line 424, "pan.___", state 871, "(1)"
+       line 422, "pan.___", state 876, "((i<2))"
+       line 422, "pan.___", state 876, "((i>=2))"
+       line 249, "pan.___", state 882, "(1)"
+       line 253, "pan.___", state 890, "(1)"
+       line 253, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 891, "else"
+       line 251, "pan.___", state 896, "((i<1))"
+       line 251, "pan.___", state 896, "((i>=1))"
+       line 257, "pan.___", state 902, "(1)"
+       line 257, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 903, "else"
+       line 261, "pan.___", state 910, "(1)"
+       line 261, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 911, "else"
+       line 259, "pan.___", state 916, "((i<2))"
+       line 259, "pan.___", state 916, "((i>=2))"
+       line 266, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 920, "else"
+       line 431, "pan.___", state 922, "(1)"
+       line 431, "pan.___", state 922, "(1)"
+       line 699, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 995, "(1)"
+       line 257, "pan.___", state 1015, "(1)"
+       line 261, "pan.___", state 1023, "(1)"
+       line 411, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1109, "(1)"
+       line 257, "pan.___", state 1129, "(1)"
+       line 261, "pan.___", state 1137, "(1)"
+       line 411, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1224, "(1)"
+       line 257, "pan.___", state 1244, "(1)"
+       line 261, "pan.___", state 1252, "(1)"
+       line 411, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1335, "(1)"
+       line 257, "pan.___", state 1355, "(1)"
+       line 261, "pan.___", state 1363, "(1)"
+       line 411, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1451, "(1)"
+       line 257, "pan.___", state 1471, "(1)"
+       line 261, "pan.___", state 1479, "(1)"
+       line 411, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1562, "(1)"
+       line 257, "pan.___", state 1582, "(1)"
+       line 261, "pan.___", state 1590, "(1)"
+       line 411, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1676, "(1)"
+       line 257, "pan.___", state 1696, "(1)"
+       line 261, "pan.___", state 1704, "(1)"
+       line 738, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1794, "(1)"
+       line 257, "pan.___", state 1814, "(1)"
+       line 261, "pan.___", state 1822, "(1)"
+       line 411, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1905, "(1)"
+       line 257, "pan.___", state 1925, "(1)"
+       line 261, "pan.___", state 1933, "(1)"
+       line 411, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1956, "(1)"
+       line 411, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 1957, "else"
+       line 411, "pan.___", state 1960, "(1)"
+       line 415, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1970, "(1)"
+       line 415, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 1971, "else"
+       line 415, "pan.___", state 1974, "(1)"
+       line 415, "pan.___", state 1975, "(1)"
+       line 415, "pan.___", state 1975, "(1)"
+       line 413, "pan.___", state 1980, "((i<1))"
+       line 413, "pan.___", state 1980, "((i>=1))"
+       line 420, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1988, "(1)"
+       line 420, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 1989, "else"
+       line 420, "pan.___", state 1992, "(1)"
+       line 420, "pan.___", state 1993, "(1)"
+       line 420, "pan.___", state 1993, "(1)"
+       line 424, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2002, "(1)"
+       line 424, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2003, "else"
+       line 424, "pan.___", state 2006, "(1)"
+       line 424, "pan.___", state 2007, "(1)"
+       line 424, "pan.___", state 2007, "(1)"
+       line 422, "pan.___", state 2012, "((i<2))"
+       line 422, "pan.___", state 2012, "((i>=2))"
+       line 249, "pan.___", state 2018, "(1)"
+       line 253, "pan.___", state 2026, "(1)"
+       line 253, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2027, "else"
+       line 251, "pan.___", state 2032, "((i<1))"
+       line 251, "pan.___", state 2032, "((i>=1))"
+       line 257, "pan.___", state 2038, "(1)"
+       line 257, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2039, "else"
+       line 261, "pan.___", state 2046, "(1)"
+       line 261, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2047, "else"
+       line 259, "pan.___", state 2052, "((i<2))"
+       line 259, "pan.___", state 2052, "((i>=2))"
+       line 266, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2056, "else"
+       line 431, "pan.___", state 2058, "(1)"
+       line 431, "pan.___", state 2058, "(1)"
+       line 738, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 738, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 738, "pan.___", state 2063, "(1)"
+       line 411, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2134, "(1)"
+       line 257, "pan.___", state 2154, "(1)"
+       line 261, "pan.___", state 2162, "(1)"
+       line 411, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2251, "(1)"
+       line 257, "pan.___", state 2271, "(1)"
+       line 261, "pan.___", state 2279, "(1)"
+       line 411, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2362, "(1)"
+       line 257, "pan.___", state 2382, "(1)"
+       line 261, "pan.___", state 2390, "(1)"
+       line 249, "pan.___", state 2421, "(1)"
+       line 257, "pan.___", state 2441, "(1)"
+       line 261, "pan.___", state 2449, "(1)"
+       line 249, "pan.___", state 2464, "(1)"
+       line 257, "pan.___", state 2484, "(1)"
+       line 261, "pan.___", state 2492, "(1)"
+       line 898, "pan.___", state 2509, "-end-"
+       (221 of 2509 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 19, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 33, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 83, "(1)"
+       line 253, "pan.___", state 91, "(1)"
+       line 272, "pan.___", state 132, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 134, "(1)"
+       line 276, "pan.___", state 141, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 143, "(1)"
+       line 276, "pan.___", state 144, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 144, "else"
+       line 274, "pan.___", state 149, "((i<1))"
+       line 274, "pan.___", state 149, "((i>=1))"
+       line 280, "pan.___", state 154, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 156, "(1)"
+       line 280, "pan.___", state 157, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 157, "else"
+       line 284, "pan.___", state 163, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 165, "(1)"
+       line 284, "pan.___", state 166, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 166, "else"
+       line 289, "pan.___", state 175, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 289, "pan.___", state 175, "else"
+       line 411, "pan.___", state 194, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 208, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 226, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 240, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 258, "(1)"
+       line 253, "pan.___", state 266, "(1)"
+       line 257, "pan.___", state 278, "(1)"
+       line 261, "pan.___", state 286, "(1)"
+       line 415, "pan.___", state 321, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 339, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 353, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 379, "(1)"
+       line 257, "pan.___", state 391, "(1)"
+       line 261, "pan.___", state 399, "(1)"
+       line 415, "pan.___", state 442, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 460, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 474, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 500, "(1)"
+       line 257, "pan.___", state 512, "(1)"
+       line 261, "pan.___", state 520, "(1)"
+       line 415, "pan.___", state 553, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 571, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 585, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 611, "(1)"
+       line 257, "pan.___", state 623, "(1)"
+       line 261, "pan.___", state 631, "(1)"
+       line 415, "pan.___", state 666, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 684, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 698, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 724, "(1)"
+       line 257, "pan.___", state 736, "(1)"
+       line 261, "pan.___", state 744, "(1)"
+       line 272, "pan.___", state 797, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 806, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 844, "(1)"
+       line 253, "pan.___", state 852, "(1)"
+       line 257, "pan.___", state 864, "(1)"
+       line 261, "pan.___", state 872, "(1)"
+       line 272, "pan.___", state 903, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 912, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 925, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 934, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 950, "(1)"
+       line 253, "pan.___", state 958, "(1)"
+       line 257, "pan.___", state 970, "(1)"
+       line 261, "pan.___", state 978, "(1)"
+       line 276, "pan.___", state 1004, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1017, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1026, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1042, "(1)"
+       line 253, "pan.___", state 1050, "(1)"
+       line 257, "pan.___", state 1062, "(1)"
+       line 261, "pan.___", state 1070, "(1)"
+       line 272, "pan.___", state 1101, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1110, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1123, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1132, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1148, "(1)"
+       line 253, "pan.___", state 1156, "(1)"
+       line 257, "pan.___", state 1168, "(1)"
+       line 261, "pan.___", state 1176, "(1)"
+       line 276, "pan.___", state 1202, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1215, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1224, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1240, "(1)"
+       line 253, "pan.___", state 1248, "(1)"
+       line 257, "pan.___", state 1260, "(1)"
+       line 261, "pan.___", state 1268, "(1)"
+       line 272, "pan.___", state 1299, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1308, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1321, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1330, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1346, "(1)"
+       line 253, "pan.___", state 1354, "(1)"
+       line 257, "pan.___", state 1366, "(1)"
+       line 261, "pan.___", state 1374, "(1)"
+       line 276, "pan.___", state 1400, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1413, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1422, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1438, "(1)"
+       line 253, "pan.___", state 1446, "(1)"
+       line 257, "pan.___", state 1458, "(1)"
+       line 261, "pan.___", state 1466, "(1)"
+       line 272, "pan.___", state 1497, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1506, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1519, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1528, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1544, "(1)"
+       line 253, "pan.___", state 1552, "(1)"
+       line 257, "pan.___", state 1564, "(1)"
+       line 261, "pan.___", state 1572, "(1)"
+       line 1237, "pan.___", state 1588, "-end-"
+       (109 of 1588 states)
+unreached in proctype :init:
+       line 1248, "pan.___", state 9, "((j<2))"
+       line 1248, "pan.___", state 9, "((j>=2))"
+       line 1249, "pan.___", state 20, "((j<2))"
+       line 1249, "pan.___", state 20, "((j>=2))"
+       line 1254, "pan.___", state 33, "((j<2))"
+       line 1254, "pan.___", state 33, "((j>=2))"
+       line 1252, "pan.___", state 43, "((i<1))"
+       line 1252, "pan.___", state 43, "((i>=1))"
+       line 1262, "pan.___", state 54, "((j<2))"
+       line 1262, "pan.___", state 54, "((j>=2))"
+       line 1266, "pan.___", state 67, "((j<2))"
+       line 1266, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 2.53e+03 seconds
+pan: rate 27617.257 states/second
+pan: avg transition delay 2.7739e-06 usec
+cp .input.spin urcu_free_no_wmb.spin.input
+cp .input.spin.trail urcu_free_no_wmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.spin.input b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.spin.input
new file mode 100644 (file)
index 0000000..511c963
--- /dev/null
@@ -0,0 +1,1273 @@
+#define NO_WMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.spin.input.trail b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_no_wmb.spin.input.trail
new file mode 100644 (file)
index 0000000..62969b9
--- /dev/null
@@ -0,0 +1,1434 @@
+-2:3:-2
+-4:-4:-4
+1:0:4177
+2:3:4097
+3:3:4100
+4:3:4100
+5:3:4103
+6:3:4111
+7:3:4111
+8:3:4114
+9:3:4120
+10:3:4124
+11:3:4124
+12:3:4127
+13:3:4137
+14:3:4145
+15:3:4145
+16:3:4148
+17:3:4154
+18:3:4158
+19:3:4158
+20:3:4161
+21:3:4167
+22:3:4171
+23:3:4172
+24:0:4177
+25:3:4174
+26:0:4177
+27:2:2511
+28:0:4177
+29:2:2517
+30:0:4177
+31:2:2518
+32:0:4177
+33:2:2520
+34:0:4177
+35:2:2521
+36:0:4177
+37:2:2522
+38:0:4177
+39:2:2523
+40:2:2524
+41:2:2528
+42:2:2529
+43:2:2537
+44:2:2538
+45:2:2542
+46:2:2543
+47:2:2551
+48:2:2556
+49:2:2560
+50:2:2561
+51:2:2569
+52:2:2570
+53:2:2574
+54:2:2575
+55:2:2569
+56:2:2570
+57:2:2574
+58:2:2575
+59:2:2583
+60:2:2588
+61:2:2595
+62:2:2596
+63:2:2603
+64:2:2608
+65:2:2615
+66:2:2616
+67:2:2615
+68:2:2616
+69:2:2623
+70:2:2633
+71:0:4177
+72:2:2522
+73:0:4177
+74:2:2685
+75:2:2686
+76:2:2687
+77:0:4177
+78:2:2522
+79:0:4177
+80:2:2692
+81:0:4177
+82:2:3306
+83:2:3307
+84:2:3311
+85:2:3315
+86:2:3316
+87:2:3320
+88:2:3325
+89:2:3333
+90:2:3337
+91:2:3338
+92:2:3333
+93:2:3334
+94:2:3342
+95:2:3349
+96:2:3356
+97:2:3357
+98:2:3364
+99:2:3369
+100:2:3376
+101:2:3377
+102:2:3376
+103:2:3377
+104:2:3384
+105:2:3388
+106:0:4177
+107:2:3393
+108:0:4177
+109:2:3394
+110:0:4177
+111:2:3395
+112:0:4177
+113:2:3396
+114:0:4177
+115:1:2
+116:0:4177
+117:2:3397
+118:0:4177
+119:1:8
+120:0:4177
+121:1:9
+122:0:4177
+123:2:3396
+124:0:4177
+125:1:10
+126:0:4177
+127:2:3397
+128:0:4177
+129:1:11
+130:0:4177
+131:2:3396
+132:0:4177
+133:1:12
+134:0:4177
+135:2:3397
+136:0:4177
+137:1:13
+138:0:4177
+139:2:3396
+140:0:4177
+141:1:14
+142:0:4177
+143:2:3397
+144:0:4177
+145:1:15
+146:0:4177
+147:1:16
+148:0:4177
+149:2:3396
+150:0:4177
+151:1:17
+152:0:4177
+153:2:3397
+154:0:4177
+155:1:26
+156:0:4177
+157:2:3396
+158:0:4177
+159:1:30
+160:1:31
+161:1:35
+162:1:39
+163:1:40
+164:1:44
+165:1:52
+166:1:53
+167:1:57
+168:1:61
+169:1:62
+170:1:57
+171:1:61
+172:1:62
+173:1:66
+174:1:73
+175:1:80
+176:1:81
+177:1:88
+178:1:93
+179:1:100
+180:1:101
+181:1:100
+182:1:101
+183:1:108
+184:1:112
+185:0:4177
+186:2:3397
+187:0:4177
+188:1:117
+189:0:4177
+190:2:3398
+191:0:4177
+192:2:3403
+193:0:4177
+194:2:3404
+195:0:4177
+196:2:3412
+197:2:3413
+198:2:3417
+199:2:3421
+200:2:3422
+201:2:3426
+202:2:3434
+203:2:3435
+204:2:3439
+205:2:3443
+206:2:3444
+207:2:3439
+208:2:3443
+209:2:3444
+210:2:3448
+211:2:3455
+212:2:3462
+213:2:3463
+214:2:3470
+215:2:3475
+216:2:3482
+217:2:3483
+218:2:3482
+219:2:3483
+220:2:3490
+221:2:3494
+222:0:4177
+223:2:2694
+224:2:3287
+225:0:4177
+226:2:2522
+227:0:4177
+228:2:2695
+229:0:4177
+230:2:2522
+231:0:4177
+232:2:2698
+233:2:2699
+234:2:2703
+235:2:2704
+236:2:2712
+237:2:2713
+238:2:2717
+239:2:2718
+240:2:2726
+241:2:2731
+242:2:2735
+243:2:2736
+244:2:2744
+245:2:2745
+246:2:2749
+247:2:2750
+248:2:2744
+249:2:2745
+250:2:2749
+251:2:2750
+252:2:2758
+253:2:2763
+254:2:2770
+255:2:2771
+256:2:2778
+257:2:2783
+258:2:2790
+259:2:2791
+260:2:2790
+261:2:2791
+262:2:2798
+263:2:2807
+264:0:4177
+265:2:2522
+266:0:4177
+267:2:2811
+268:2:2812
+269:2:2813
+270:2:2825
+271:2:2826
+272:2:2830
+273:2:2831
+274:2:2839
+275:2:2844
+276:2:2848
+277:2:2849
+278:2:2857
+279:2:2858
+280:2:2862
+281:2:2863
+282:2:2857
+283:2:2858
+284:2:2862
+285:2:2863
+286:2:2871
+287:2:2876
+288:2:2883
+289:2:2884
+290:2:2891
+291:2:2896
+292:2:2903
+293:2:2904
+294:2:2903
+295:2:2904
+296:2:2911
+297:2:2924
+298:2:2925
+299:0:4177
+300:2:2522
+301:0:4177
+302:2:2932
+303:2:2933
+304:2:2937
+305:2:2938
+306:2:2946
+307:2:2947
+308:2:2951
+309:2:2952
+310:2:2960
+311:2:2965
+312:2:2969
+313:2:2970
+314:2:2978
+315:2:2979
+316:2:2983
+317:2:2984
+318:2:2978
+319:2:2979
+320:2:2983
+321:2:2984
+322:2:2992
+323:2:2997
+324:2:3004
+325:2:3005
+326:2:3012
+327:2:3017
+328:2:3024
+329:2:3025
+330:2:3024
+331:2:3025
+332:2:3032
+333:0:4177
+334:2:2522
+335:0:4177
+336:2:3043
+337:2:3044
+338:2:3048
+339:2:3049
+340:2:3057
+341:2:3058
+342:2:3062
+343:2:3063
+344:2:3071
+345:2:3076
+346:2:3080
+347:2:3081
+348:2:3089
+349:2:3090
+350:2:3094
+351:2:3095
+352:2:3089
+353:2:3090
+354:2:3094
+355:2:3095
+356:2:3103
+357:2:3108
+358:2:3115
+359:2:3116
+360:2:3123
+361:2:3128
+362:2:3135
+363:2:3136
+364:2:3135
+365:2:3136
+366:2:3143
+367:2:3152
+368:0:4177
+369:2:2522
+370:0:4177
+371:2:3156
+372:2:3157
+373:2:3158
+374:2:3170
+375:2:3171
+376:2:3175
+377:2:3176
+378:2:3184
+379:2:3189
+380:2:3193
+381:2:3194
+382:2:3202
+383:2:3203
+384:2:3207
+385:2:3208
+386:2:3202
+387:2:3203
+388:2:3207
+389:2:3208
+390:2:3216
+391:2:3221
+392:2:3228
+393:2:3229
+394:2:3236
+395:2:3241
+396:2:3248
+397:2:3249
+398:2:3248
+399:2:3249
+400:2:3256
+401:2:3268
+402:2:3269
+403:0:4177
+404:2:2522
+405:0:4177
+406:2:3275
+407:0:4177
+408:2:3900
+409:2:3901
+410:2:3905
+411:2:3909
+412:2:3910
+413:2:3914
+414:2:3922
+415:2:3923
+416:2:3927
+417:2:3931
+418:2:3932
+419:2:3927
+420:2:3931
+421:2:3932
+422:2:3936
+423:2:3943
+424:2:3950
+425:2:3951
+426:2:3958
+427:2:3963
+428:2:3970
+429:2:3971
+430:2:3970
+431:2:3971
+432:2:3978
+433:2:3982
+434:0:4177
+435:2:3987
+436:0:4177
+437:2:3988
+438:0:4177
+439:2:3989
+440:0:4177
+441:2:3990
+442:0:4177
+443:1:26
+444:0:4177
+445:2:3991
+446:0:4177
+447:1:30
+448:1:31
+449:1:35
+450:1:39
+451:1:40
+452:1:44
+453:1:52
+454:1:53
+455:1:57
+456:1:61
+457:1:62
+458:1:57
+459:1:61
+460:1:62
+461:1:66
+462:1:73
+463:1:80
+464:1:81
+465:1:88
+466:1:93
+467:1:100
+468:1:101
+469:1:100
+470:1:101
+471:1:108
+472:1:112
+473:0:4177
+474:2:3990
+475:0:4177
+476:1:117
+477:0:4177
+478:2:3991
+479:0:4177
+480:2:3992
+481:0:4177
+482:2:3997
+483:0:4177
+484:2:3998
+485:0:4177
+486:2:4006
+487:2:4007
+488:2:4011
+489:2:4015
+490:2:4016
+491:2:4020
+492:2:4028
+493:2:4029
+494:2:4033
+495:2:4037
+496:2:4038
+497:2:4033
+498:2:4037
+499:2:4038
+500:2:4042
+501:2:4049
+502:2:4056
+503:2:4057
+504:2:4064
+505:2:4069
+506:2:4076
+507:2:4077
+508:2:4076
+509:2:4077
+510:2:4084
+511:2:4088
+512:0:4177
+513:2:3277
+514:2:3287
+515:0:4177
+516:2:2522
+517:0:4177
+518:2:3278
+519:2:3279
+520:0:4177
+521:2:2522
+522:0:4177
+523:2:3283
+524:0:4177
+525:2:3291
+526:0:4177
+527:2:2518
+528:0:4177
+529:2:2520
+530:0:4177
+531:2:2521
+532:0:4177
+533:2:2522
+534:0:4177
+535:2:2685
+536:2:2686
+537:2:2687
+538:0:4177
+539:2:2522
+540:0:4177
+541:2:2523
+542:2:2524
+543:2:2528
+544:2:2529
+545:2:2537
+546:2:2538
+547:2:2542
+548:2:2543
+549:2:2551
+550:2:2556
+551:2:2557
+552:2:2569
+553:2:2570
+554:2:2571
+555:2:2569
+556:2:2570
+557:2:2574
+558:2:2575
+559:2:2583
+560:2:2588
+561:2:2595
+562:2:2596
+563:2:2603
+564:2:2608
+565:2:2615
+566:2:2616
+567:2:2615
+568:2:2616
+569:2:2623
+570:2:2633
+571:0:4177
+572:2:2522
+573:0:4177
+574:2:2692
+575:0:4177
+576:2:3306
+577:2:3307
+578:2:3311
+579:2:3315
+580:2:3316
+581:2:3320
+582:2:3328
+583:2:3329
+584:2:3333
+585:2:3334
+586:2:3333
+587:2:3337
+588:2:3338
+589:2:3342
+590:2:3349
+591:2:3356
+592:2:3357
+593:2:3364
+594:2:3369
+595:2:3376
+596:2:3377
+597:2:3376
+598:2:3377
+599:2:3384
+600:2:3388
+601:0:4177
+602:2:3393
+603:0:4177
+604:2:3394
+605:0:4177
+606:2:3395
+607:0:4177
+608:2:3396
+609:0:4177
+610:1:26
+611:0:4177
+612:2:3397
+613:0:4177
+614:1:30
+615:1:31
+616:1:35
+617:1:39
+618:1:40
+619:1:44
+620:1:52
+621:1:53
+622:1:57
+623:1:61
+624:1:62
+625:1:57
+626:1:61
+627:1:62
+628:1:66
+629:1:73
+630:1:80
+631:1:81
+632:1:88
+633:1:93
+634:1:100
+635:1:101
+636:1:100
+637:1:101
+638:1:108
+639:1:112
+640:0:4177
+641:2:3396
+642:0:4177
+643:1:117
+644:0:4177
+645:2:3397
+646:0:4177
+647:2:3398
+648:0:4177
+649:2:3403
+650:0:4177
+651:2:3404
+652:0:4177
+653:2:3412
+654:2:3413
+655:2:3417
+656:2:3421
+657:2:3422
+658:2:3426
+659:2:3434
+660:2:3435
+661:2:3439
+662:2:3443
+663:2:3444
+664:2:3439
+665:2:3443
+666:2:3444
+667:2:3448
+668:2:3455
+669:2:3462
+670:2:3463
+671:2:3470
+672:2:3475
+673:2:3482
+674:2:3483
+675:2:3482
+676:2:3483
+677:2:3490
+678:2:3494
+679:0:4177
+680:2:2694
+681:2:3287
+682:0:4177
+683:2:2522
+684:0:4177
+685:2:2695
+686:0:4177
+687:2:2522
+688:0:4177
+689:2:2698
+690:2:2699
+691:2:2703
+692:2:2704
+693:2:2712
+694:2:2713
+695:2:2717
+696:2:2718
+697:2:2726
+698:2:2731
+699:2:2735
+700:2:2736
+701:2:2744
+702:2:2745
+703:2:2749
+704:2:2750
+705:2:2744
+706:2:2745
+707:2:2749
+708:2:2750
+709:2:2758
+710:2:2763
+711:2:2770
+712:2:2771
+713:2:2778
+714:2:2783
+715:2:2790
+716:2:2791
+717:2:2790
+718:2:2791
+719:2:2798
+720:2:2807
+721:0:4177
+722:2:2522
+723:0:4177
+724:2:2811
+725:2:2812
+726:2:2813
+727:2:2825
+728:2:2826
+729:2:2830
+730:2:2831
+731:2:2839
+732:2:2844
+733:2:2848
+734:2:2849
+735:2:2857
+736:2:2858
+737:2:2862
+738:2:2863
+739:2:2857
+740:2:2858
+741:2:2862
+742:2:2863
+743:2:2871
+744:2:2876
+745:2:2883
+746:2:2884
+747:2:2891
+748:2:2896
+749:2:2903
+750:2:2904
+751:2:2903
+752:2:2904
+753:2:2911
+754:2:2924
+755:2:2925
+756:0:4177
+757:2:2522
+758:0:4177
+759:2:2932
+760:2:2933
+761:2:2937
+762:2:2938
+763:2:2946
+764:2:2947
+765:2:2951
+766:2:2952
+767:2:2960
+768:2:2965
+769:2:2969
+770:2:2970
+771:2:2978
+772:2:2979
+773:2:2983
+774:2:2984
+775:2:2978
+776:2:2979
+777:2:2983
+778:2:2984
+779:2:2992
+780:2:2997
+781:2:3004
+782:2:3005
+783:2:3012
+784:2:3017
+785:2:3024
+786:2:3025
+787:2:3024
+788:2:3025
+789:2:3032
+790:0:4177
+791:2:2522
+792:0:4177
+793:2:3043
+794:2:3044
+795:2:3048
+796:2:3049
+797:2:3057
+798:2:3058
+799:2:3062
+800:2:3063
+801:2:3071
+802:2:3076
+803:2:3080
+804:2:3081
+805:2:3089
+806:2:3090
+807:2:3094
+808:2:3095
+809:2:3089
+810:2:3090
+811:2:3094
+812:2:3095
+813:2:3103
+814:2:3108
+815:2:3115
+816:2:3116
+817:2:3123
+818:2:3128
+819:2:3135
+820:2:3136
+821:2:3135
+822:2:3136
+823:2:3143
+824:2:3152
+825:0:4177
+826:2:2522
+827:0:4177
+828:2:3156
+829:2:3157
+830:2:3158
+831:2:3170
+832:2:3171
+833:2:3175
+834:2:3176
+835:2:3184
+836:2:3189
+837:2:3193
+838:2:3194
+839:2:3202
+840:2:3203
+841:2:3207
+842:2:3208
+843:2:3202
+844:2:3203
+845:2:3207
+846:2:3208
+847:2:3216
+848:2:3221
+849:2:3228
+850:2:3229
+851:2:3236
+852:2:3241
+853:2:3248
+854:2:3249
+855:2:3248
+856:2:3249
+857:2:3256
+858:2:3268
+859:2:3269
+860:0:4177
+861:2:2522
+862:0:4177
+863:2:3275
+864:0:4177
+865:2:3900
+866:2:3901
+867:2:3905
+868:2:3909
+869:2:3910
+870:2:3914
+871:2:3922
+872:2:3923
+873:2:3927
+874:2:3931
+875:2:3932
+876:2:3927
+877:2:3931
+878:2:3932
+879:2:3936
+880:2:3943
+881:2:3950
+882:2:3951
+883:2:3958
+884:2:3963
+885:2:3970
+886:2:3971
+887:2:3970
+888:2:3971
+889:2:3978
+890:2:3982
+891:0:4177
+892:2:3987
+893:0:4177
+894:2:3988
+895:0:4177
+896:2:3989
+897:0:4177
+898:2:3990
+899:0:4177
+900:1:26
+901:0:4177
+902:2:3991
+903:0:4177
+904:1:30
+905:1:31
+906:1:35
+907:1:39
+908:1:40
+909:1:44
+910:1:52
+911:1:53
+912:1:57
+913:1:61
+914:1:62
+915:1:57
+916:1:61
+917:1:62
+918:1:66
+919:1:73
+920:1:80
+921:1:81
+922:1:88
+923:1:93
+924:1:100
+925:1:101
+926:1:100
+927:1:101
+928:1:108
+929:1:112
+930:0:4177
+931:2:3990
+932:0:4177
+933:1:117
+934:0:4177
+935:2:3991
+936:0:4177
+937:2:3992
+938:0:4177
+939:2:3997
+940:0:4177
+941:2:3998
+942:0:4177
+943:2:4006
+944:2:4007
+945:2:4011
+946:2:4015
+947:2:4016
+948:2:4020
+949:2:4028
+950:2:4029
+951:2:4033
+952:2:4037
+953:2:4038
+954:2:4033
+955:2:4037
+956:2:4038
+957:2:4042
+958:2:4049
+959:2:4056
+960:2:4057
+961:2:4064
+962:2:4069
+963:2:4076
+964:2:4077
+965:2:4076
+966:2:4077
+967:2:4084
+968:2:4088
+969:0:4177
+970:2:3277
+971:2:3287
+972:0:4177
+973:2:2522
+974:0:4177
+975:2:3278
+976:2:3279
+977:0:4177
+978:2:2522
+979:0:4177
+980:2:3283
+981:0:4177
+982:2:3291
+983:0:4177
+984:2:2518
+985:0:4177
+986:2:2520
+987:0:4177
+988:2:2521
+989:0:4177
+990:2:2522
+991:0:4177
+992:2:2523
+993:2:2524
+994:2:2528
+995:2:2529
+996:2:2537
+997:2:2538
+998:2:2542
+999:2:2543
+1000:2:2551
+1001:2:2556
+1002:2:2560
+1003:2:2561
+1004:2:2569
+1005:2:2570
+1006:2:2574
+1007:2:2575
+1008:2:2569
+1009:2:2570
+1010:2:2571
+1011:2:2583
+1012:2:2588
+1013:2:2595
+1014:2:2596
+1015:2:2603
+1016:2:2608
+1017:2:2615
+1018:2:2616
+1019:2:2615
+1020:2:2616
+1021:2:2623
+1022:2:2633
+1023:0:4177
+1024:2:2522
+1025:0:4177
+1026:2:2685
+1027:2:2686
+1028:2:2687
+1029:0:4177
+1030:2:2522
+1031:0:4177
+1032:2:2692
+1033:0:4177
+1034:1:118
+1035:0:4177
+1036:1:120
+1037:0:4177
+1038:1:19
+1039:0:4177
+1040:1:126
+1041:1:127
+1042:1:131
+1043:1:132
+1044:1:140
+1045:1:141
+1046:1:145
+1047:1:146
+1048:1:154
+1049:1:159
+1050:1:163
+1051:1:164
+1052:1:172
+1053:1:173
+1054:1:177
+1055:1:178
+1056:1:172
+1057:1:173
+1058:1:177
+1059:1:178
+1060:1:186
+1061:1:191
+1062:1:198
+1063:1:199
+1064:1:206
+1065:1:211
+1066:1:218
+1067:1:219
+1068:1:218
+1069:1:219
+1070:1:226
+1071:0:4177
+1072:1:15
+1073:0:4177
+1074:1:16
+1075:0:4177
+1076:1:17
+1077:0:4177
+1078:1:118
+1079:0:4177
+1080:1:120
+1081:0:4177
+1082:1:19
+1083:0:4177
+1084:1:237
+1085:1:238
+1086:0:4177
+1087:1:15
+1088:0:4177
+1089:1:16
+1090:0:4177
+1091:1:17
+1092:0:4177
+1093:1:118
+1094:0:4177
+1095:1:120
+1096:0:4177
+1097:1:19
+1098:0:4177
+1099:1:244
+1100:1:245
+1101:1:249
+1102:1:250
+1103:1:258
+1104:1:259
+1105:1:263
+1106:1:264
+1107:1:272
+1108:1:277
+1109:1:281
+1110:1:282
+1111:1:290
+1112:1:291
+1113:1:295
+1114:1:296
+1115:1:290
+1116:1:291
+1117:1:295
+1118:1:296
+1119:1:304
+1120:1:309
+1121:1:316
+1122:1:317
+1123:1:324
+1124:1:329
+1125:1:336
+1126:1:337
+1127:1:336
+1128:1:337
+1129:1:344
+1130:0:4177
+1131:1:15
+1132:0:4177
+1133:1:16
+1134:0:4177
+1135:1:17
+1136:0:4177
+1137:1:118
+1138:0:4177
+1139:1:120
+1140:0:4177
+1141:1:19
+1142:0:4177
+1143:1:355
+1144:1:356
+1145:1:360
+1146:1:361
+1147:1:369
+1148:1:370
+1149:1:374
+1150:1:375
+1151:1:383
+1152:1:388
+1153:1:392
+1154:1:393
+1155:1:401
+1156:1:402
+1157:1:406
+1158:1:407
+1159:1:401
+1160:1:402
+1161:1:406
+1162:1:407
+1163:1:415
+1164:1:420
+1165:1:427
+1166:1:428
+1167:1:435
+1168:1:440
+1169:1:447
+1170:1:448
+1171:1:447
+1172:1:448
+1173:1:455
+1174:1:464
+1175:0:4177
+1176:1:15
+1177:0:4177
+1178:1:16
+1179:0:4177
+1180:1:17
+1181:0:4177
+1182:1:118
+1183:0:4177
+1184:1:120
+1185:0:4177
+1186:1:19
+1187:0:4177
+1188:1:584
+1189:1:585
+1190:1:589
+1191:1:590
+1192:1:598
+1193:1:599
+1194:1:600
+1195:1:612
+1196:1:617
+1197:1:621
+1198:1:622
+1199:1:630
+1200:1:631
+1201:1:635
+1202:1:636
+1203:1:630
+1204:1:631
+1205:1:635
+1206:1:636
+1207:1:644
+1208:1:649
+1209:1:656
+1210:1:657
+1211:1:664
+1212:1:669
+1213:1:676
+1214:1:677
+1215:1:676
+1216:1:677
+1217:1:684
+1218:0:4177
+1219:1:15
+1220:0:4177
+1221:1:16
+1222:0:4177
+1223:1:17
+1224:0:4177
+1225:1:118
+1226:0:4177
+1227:1:120
+1228:0:4177
+1229:1:19
+1230:0:4177
+1231:1:695
+1232:1:698
+1233:1:699
+1234:0:4177
+1235:1:15
+1236:0:4177
+1237:1:16
+1238:0:4177
+1239:1:17
+1240:0:4177
+1241:1:118
+1242:0:4177
+1243:1:120
+1244:0:4177
+1245:1:19
+1246:0:4177
+1247:1:702
+1248:1:703
+1249:1:707
+1250:1:708
+1251:1:716
+1252:1:717
+1253:1:721
+1254:1:722
+1255:1:730
+1256:1:735
+1257:1:739
+1258:1:740
+1259:1:748
+1260:1:749
+1261:1:753
+1262:1:754
+1263:1:748
+1264:1:749
+1265:1:753
+1266:1:754
+1267:1:762
+1268:1:767
+1269:1:774
+1270:1:775
+1271:1:782
+1272:1:787
+1273:1:794
+1274:1:795
+1275:1:794
+1276:1:795
+1277:1:802
+1278:0:4177
+1279:1:15
+1280:0:4177
+1281:1:16
+1282:0:4177
+1283:1:17
+1284:0:4177
+1285:1:118
+1286:0:4177
+1287:1:120
+1288:0:4177
+1289:1:19
+1290:0:4177
+1291:1:926
+1292:1:927
+1293:1:931
+1294:1:932
+1295:1:940
+1296:1:941
+1297:1:945
+1298:1:946
+1299:1:954
+1300:1:959
+1301:1:963
+1302:1:964
+1303:1:972
+1304:1:973
+1305:1:977
+1306:1:978
+1307:1:972
+1308:1:973
+1309:1:977
+1310:1:978
+1311:1:986
+1312:1:991
+1313:1:998
+1314:1:999
+1315:1:1006
+1316:1:1011
+1317:1:1018
+1318:1:1019
+1319:1:1018
+1320:1:1019
+1321:1:1026
+1322:1:1035
+1323:1:1039
+1324:0:4177
+1325:1:15
+1326:0:4177
+1327:1:16
+1328:0:4177
+1329:1:17
+1330:0:4177
+1331:1:118
+1332:0:4177
+1333:1:120
+1334:0:4177
+1335:1:19
+1336:0:4177
+1337:1:1040
+1338:1:1041
+1339:1:1045
+1340:1:1046
+1341:1:1054
+1342:1:1055
+1343:1:1056
+1344:1:1068
+1345:1:1073
+1346:1:1077
+1347:1:1078
+1348:1:1086
+1349:1:1087
+1350:1:1091
+1351:1:1092
+1352:1:1086
+1353:1:1087
+1354:1:1091
+1355:1:1092
+1356:1:1100
+1357:1:1105
+1358:1:1112
+1359:1:1113
+1360:1:1120
+1361:1:1125
+1362:1:1132
+1363:1:1133
+1364:1:1132
+1365:1:1133
+1366:1:1140
+1367:0:4177
+1368:1:15
+1369:0:4177
+1370:1:16
+1371:0:4177
+1372:1:17
+1373:0:4177
+1374:1:118
+1375:0:4177
+1376:1:120
+1377:0:4177
+1378:1:19
+1379:0:4177
+1380:1:1151
+1381:0:4177
+1382:1:2417
+1383:1:2424
+1384:1:2425
+1385:1:2432
+1386:1:2437
+1387:1:2444
+1388:1:2445
+1389:1:2444
+1390:1:2445
+1391:1:2452
+1392:1:2456
+1393:0:4177
+1394:2:3306
+1395:2:3307
+1396:2:3311
+1397:2:3315
+1398:2:3316
+1399:2:3320
+1400:2:3325
+1401:2:3333
+1402:2:3337
+1403:2:3338
+1404:2:3333
+1405:2:3334
+1406:2:3342
+1407:2:3349
+1408:2:3356
+1409:2:3357
+1410:2:3364
+1411:2:3369
+1412:2:3376
+1413:2:3377
+1414:2:3376
+1415:2:3377
+1416:2:3384
+1417:2:3388
+1418:0:4177
+1419:2:3393
+1420:0:4177
+1421:2:3394
+1422:0:4177
+1423:2:3395
+1424:0:4177
+1425:2:3396
+1426:0:4177
+1427:1:1153
+1428:1:1154
+1429:0:4175
+1430:2:3397
+1431:0:4181
+1432:1:2165
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.define b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.define
new file mode 100644 (file)
index 0000000..5e642ef
--- /dev/null
@@ -0,0 +1 @@
+#define SINGLE_FLIP
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.log b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.log
new file mode 100644 (file)
index 0000000..3fc8991
--- /dev/null
@@ -0,0 +1,635 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_single_flip.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    9112 States=    1e+06 Transitions=  7.1e+06 Memory=   550.432        t=   17.4 R=   6e+04
+Depth=    9112 States=    2e+06 Transitions= 1.64e+07 Memory=   634.318        t=   41.8 R=   5e+04
+Depth=    9112 States=    3e+06 Transitions= 2.63e+07 Memory=   718.303        t=   68.2 R=   4e+04
+pan: resizing hashtable to -w22..  done
+Depth=    9112 States=    4e+06 Transitions= 3.56e+07 Memory=   833.311        t=   93.2 R=   4e+04
+Depth=    9112 States=    5e+06 Transitions= 4.66e+07 Memory=   917.295        t=    122 R=   4e+04
+Depth=    9112 States=    6e+06 Transitions= 5.77e+07 Memory=  1001.279        t=    152 R=   4e+04
+Depth=    9112 States=    7e+06 Transitions= 6.72e+07 Memory=  1085.264        t=    177 R=   4e+04
+Depth=    9112 States=    8e+06 Transitions= 7.41e+07 Memory=  1169.151        t=    194 R=   4e+04
+Depth=    9112 States=    9e+06 Transitions= 8.11e+07 Memory=  1253.135        t=    212 R=   4e+04
+pan: resizing hashtable to -w24..  done
+Depth=    9112 States=    1e+07 Transitions= 8.81e+07 Memory=  1461.115        t=    231 R=   4e+04
+Depth=    9112 States=  1.1e+07 Transitions= 9.84e+07 Memory=  1545.100        t=    259 R=   4e+04
+Depth=    9112 States=  1.2e+07 Transitions= 1.08e+08 Memory=  1629.084        t=    282 R=   4e+04
+Depth=    9112 States=  1.3e+07 Transitions= 1.16e+08 Memory=  1713.068        t=    304 R=   4e+04
+Depth=    9112 States=  1.4e+07 Transitions= 1.25e+08 Memory=  1797.053        t=    329 R=   4e+04
+Depth=    9112 States=  1.5e+07 Transitions= 1.35e+08 Memory=  1881.037        t=    352 R=   4e+04
+Depth=    9278 States=  1.6e+07 Transitions= 1.45e+08 Memory=  1964.924        t=    382 R=   4e+04
+Depth=    9283 States=  1.7e+07 Transitions= 1.56e+08 Memory=  2048.908        t=    409 R=   4e+04
+Depth=    9283 States=  1.8e+07 Transitions= 1.66e+08 Memory=  2132.893        t=    437 R=   4e+04
+Depth=    9283 States=  1.9e+07 Transitions= 1.76e+08 Memory=  2216.877        t=    464 R=   4e+04
+Depth=    9283 States=    2e+07 Transitions= 1.87e+08 Memory=  2300.861        t=    493 R=   4e+04
+Depth=    9283 States=  2.1e+07 Transitions= 1.97e+08 Memory=  2384.846        t=    521 R=   4e+04
+Depth=    9283 States=  2.2e+07 Transitions= 2.08e+08 Memory=  2468.830        t=    549 R=   4e+04
+Depth=    9283 States=  2.3e+07 Transitions= 2.17e+08 Memory=  2552.717        t=    572 R=   4e+04
+Depth=    9283 States=  2.4e+07 Transitions= 2.26e+08 Memory=  2636.701        t=    596 R=   4e+04
+Depth=    9283 States=  2.5e+07 Transitions= 2.37e+08 Memory=  2720.686        t=    627 R=   4e+04
+Depth=    9283 States=  2.6e+07 Transitions= 2.49e+08 Memory=  2804.670        t=    659 R=   4e+04
+Depth=    9283 States=  2.7e+07 Transitions=  2.6e+08 Memory=  2888.654        t=    689 R=   4e+04
+Depth=    9283 States=  2.8e+07 Transitions= 2.71e+08 Memory=  2972.639        t=    718 R=   4e+04
+Depth=    9283 States=  2.9e+07 Transitions=  2.8e+08 Memory=  3056.526        t=    741 R=   4e+04
+Depth=    9283 States=    3e+07 Transitions= 2.89e+08 Memory=  3140.510        t=    764 R=   4e+04
+Depth=    9283 States=  3.1e+07 Transitions= 2.99e+08 Memory=  3224.494        t=    791 R=   4e+04
+Depth=    9283 States=  3.2e+07 Transitions= 3.09e+08 Memory=  3308.479        t=    818 R=   4e+04
+Depth=    9283 States=  3.3e+07 Transitions= 3.19e+08 Memory=  3392.463        t=    846 R=   4e+04
+Depth=    9283 States=  3.4e+07 Transitions= 3.29e+08 Memory=  3476.447        t=    873 R=   4e+04
+pan: resizing hashtable to -w26..  done
+Depth=    9283 States=  3.5e+07 Transitions= 3.39e+08 Memory=  4056.416        t=    908 R=   4e+04
+Depth=    9283 States=  3.6e+07 Transitions= 3.49e+08 Memory=  4140.401        t=    934 R=   4e+04
+Depth=    9283 States=  3.7e+07 Transitions= 3.59e+08 Memory=  4224.385        t=    959 R=   4e+04
+pan: claim violated! (at depth 1298)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 9283, errors: 1
+ 37684654 states, stored
+3.2905753e+08 states, matched
+3.6674218e+08 transitions (= stored+matched)
+5.3145922e+09 atomic steps
+hash conflicts: 2.6456917e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 4168.911      equivalent memory usage for states (stored*(State-vector + overhead))
+ 3314.050      actual memory usage for states (compression: 79.49%)
+               state-vector as stored = 64 byte + 28 byte overhead
+  512.000      memory used for hash table (-w26)
+  457.764      memory used for DFS stack (-m10000000)
+    1.910      memory lost to fragmentation
+ 4281.904      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 272, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 77, "(1)"
+       line 253, "pan.___", state 85, "(1)"
+       line 257, "pan.___", state 97, "(1)"
+       line 261, "pan.___", state 105, "(1)"
+       line 411, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 195, "(1)"
+       line 257, "pan.___", state 215, "(1)"
+       line 261, "pan.___", state 223, "(1)"
+       line 691, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 313, "(1)"
+       line 257, "pan.___", state 333, "(1)"
+       line 261, "pan.___", state 341, "(1)"
+       line 411, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 424, "(1)"
+       line 257, "pan.___", state 444, "(1)"
+       line 261, "pan.___", state 452, "(1)"
+       line 411, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 475, "(1)"
+       line 411, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 476, "else"
+       line 411, "pan.___", state 479, "(1)"
+       line 415, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 489, "(1)"
+       line 415, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 490, "else"
+       line 415, "pan.___", state 493, "(1)"
+       line 415, "pan.___", state 494, "(1)"
+       line 415, "pan.___", state 494, "(1)"
+       line 413, "pan.___", state 499, "((i<1))"
+       line 413, "pan.___", state 499, "((i>=1))"
+       line 420, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 507, "(1)"
+       line 420, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 508, "else"
+       line 420, "pan.___", state 511, "(1)"
+       line 420, "pan.___", state 512, "(1)"
+       line 420, "pan.___", state 512, "(1)"
+       line 424, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 521, "(1)"
+       line 424, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 522, "else"
+       line 424, "pan.___", state 525, "(1)"
+       line 424, "pan.___", state 526, "(1)"
+       line 424, "pan.___", state 526, "(1)"
+       line 422, "pan.___", state 531, "((i<2))"
+       line 422, "pan.___", state 531, "((i>=2))"
+       line 249, "pan.___", state 537, "(1)"
+       line 253, "pan.___", state 545, "(1)"
+       line 253, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 546, "else"
+       line 251, "pan.___", state 551, "((i<1))"
+       line 251, "pan.___", state 551, "((i>=1))"
+       line 257, "pan.___", state 557, "(1)"
+       line 257, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 558, "else"
+       line 261, "pan.___", state 565, "(1)"
+       line 261, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 566, "else"
+       line 259, "pan.___", state 571, "((i<2))"
+       line 259, "pan.___", state 571, "((i>=2))"
+       line 266, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 575, "else"
+       line 431, "pan.___", state 577, "(1)"
+       line 431, "pan.___", state 577, "(1)"
+       line 691, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 691, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 691, "pan.___", state 582, "(1)"
+       line 411, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 653, "(1)"
+       line 257, "pan.___", state 673, "(1)"
+       line 261, "pan.___", state 681, "(1)"
+       line 411, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 771, "(1)"
+       line 257, "pan.___", state 791, "(1)"
+       line 261, "pan.___", state 799, "(1)"
+       line 411, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 820, "(1)"
+       line 411, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 821, "else"
+       line 411, "pan.___", state 824, "(1)"
+       line 415, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 834, "(1)"
+       line 415, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 835, "else"
+       line 415, "pan.___", state 838, "(1)"
+       line 415, "pan.___", state 839, "(1)"
+       line 415, "pan.___", state 839, "(1)"
+       line 413, "pan.___", state 844, "((i<1))"
+       line 413, "pan.___", state 844, "((i>=1))"
+       line 420, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 852, "(1)"
+       line 420, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 853, "else"
+       line 420, "pan.___", state 856, "(1)"
+       line 420, "pan.___", state 857, "(1)"
+       line 420, "pan.___", state 857, "(1)"
+       line 424, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 866, "(1)"
+       line 424, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 867, "else"
+       line 424, "pan.___", state 870, "(1)"
+       line 424, "pan.___", state 871, "(1)"
+       line 424, "pan.___", state 871, "(1)"
+       line 422, "pan.___", state 876, "((i<2))"
+       line 422, "pan.___", state 876, "((i>=2))"
+       line 249, "pan.___", state 882, "(1)"
+       line 253, "pan.___", state 890, "(1)"
+       line 253, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 891, "else"
+       line 251, "pan.___", state 896, "((i<1))"
+       line 251, "pan.___", state 896, "((i>=1))"
+       line 257, "pan.___", state 902, "(1)"
+       line 257, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 903, "else"
+       line 261, "pan.___", state 910, "(1)"
+       line 261, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 911, "else"
+       line 259, "pan.___", state 916, "((i<2))"
+       line 259, "pan.___", state 916, "((i>=2))"
+       line 266, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 920, "else"
+       line 431, "pan.___", state 922, "(1)"
+       line 431, "pan.___", state 922, "(1)"
+       line 699, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 995, "(1)"
+       line 257, "pan.___", state 1015, "(1)"
+       line 261, "pan.___", state 1023, "(1)"
+       line 411, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1109, "(1)"
+       line 257, "pan.___", state 1129, "(1)"
+       line 261, "pan.___", state 1137, "(1)"
+       line 411, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1224, "(1)"
+       line 257, "pan.___", state 1244, "(1)"
+       line 261, "pan.___", state 1252, "(1)"
+       line 411, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1335, "(1)"
+       line 257, "pan.___", state 1355, "(1)"
+       line 261, "pan.___", state 1363, "(1)"
+       line 411, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1451, "(1)"
+       line 257, "pan.___", state 1471, "(1)"
+       line 261, "pan.___", state 1479, "(1)"
+       line 411, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1562, "(1)"
+       line 257, "pan.___", state 1582, "(1)"
+       line 261, "pan.___", state 1590, "(1)"
+       line 411, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1676, "(1)"
+       line 257, "pan.___", state 1696, "(1)"
+       line 261, "pan.___", state 1704, "(1)"
+       line 738, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1794, "(1)"
+       line 257, "pan.___", state 1814, "(1)"
+       line 261, "pan.___", state 1822, "(1)"
+       line 411, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1905, "(1)"
+       line 257, "pan.___", state 1925, "(1)"
+       line 261, "pan.___", state 1933, "(1)"
+       line 411, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1956, "(1)"
+       line 411, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 1957, "else"
+       line 411, "pan.___", state 1960, "(1)"
+       line 415, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1970, "(1)"
+       line 415, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 1971, "else"
+       line 415, "pan.___", state 1974, "(1)"
+       line 415, "pan.___", state 1975, "(1)"
+       line 415, "pan.___", state 1975, "(1)"
+       line 413, "pan.___", state 1980, "((i<1))"
+       line 413, "pan.___", state 1980, "((i>=1))"
+       line 420, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1988, "(1)"
+       line 420, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 1989, "else"
+       line 420, "pan.___", state 1992, "(1)"
+       line 420, "pan.___", state 1993, "(1)"
+       line 420, "pan.___", state 1993, "(1)"
+       line 424, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2002, "(1)"
+       line 424, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2003, "else"
+       line 424, "pan.___", state 2006, "(1)"
+       line 424, "pan.___", state 2007, "(1)"
+       line 424, "pan.___", state 2007, "(1)"
+       line 422, "pan.___", state 2012, "((i<2))"
+       line 422, "pan.___", state 2012, "((i>=2))"
+       line 249, "pan.___", state 2018, "(1)"
+       line 253, "pan.___", state 2026, "(1)"
+       line 253, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2027, "else"
+       line 251, "pan.___", state 2032, "((i<1))"
+       line 251, "pan.___", state 2032, "((i>=1))"
+       line 257, "pan.___", state 2038, "(1)"
+       line 257, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2039, "else"
+       line 261, "pan.___", state 2046, "(1)"
+       line 261, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2047, "else"
+       line 259, "pan.___", state 2052, "((i<2))"
+       line 259, "pan.___", state 2052, "((i>=2))"
+       line 266, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2056, "else"
+       line 431, "pan.___", state 2058, "(1)"
+       line 431, "pan.___", state 2058, "(1)"
+       line 738, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 738, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 738, "pan.___", state 2063, "(1)"
+       line 411, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2134, "(1)"
+       line 257, "pan.___", state 2154, "(1)"
+       line 261, "pan.___", state 2162, "(1)"
+       line 411, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2251, "(1)"
+       line 257, "pan.___", state 2271, "(1)"
+       line 261, "pan.___", state 2279, "(1)"
+       line 411, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2362, "(1)"
+       line 257, "pan.___", state 2382, "(1)"
+       line 261, "pan.___", state 2390, "(1)"
+       line 249, "pan.___", state 2421, "(1)"
+       line 257, "pan.___", state 2441, "(1)"
+       line 261, "pan.___", state 2449, "(1)"
+       line 249, "pan.___", state 2464, "(1)"
+       line 257, "pan.___", state 2484, "(1)"
+       line 261, "pan.___", state 2492, "(1)"
+       line 898, "pan.___", state 2509, "-end-"
+       (221 of 2509 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 22, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 36, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 54, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 86, "(1)"
+       line 253, "pan.___", state 94, "(1)"
+       line 257, "pan.___", state 106, "(1)"
+       line 272, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 144, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 157, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 197, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 211, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 229, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 243, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 261, "(1)"
+       line 253, "pan.___", state 269, "(1)"
+       line 257, "pan.___", state 281, "(1)"
+       line 261, "pan.___", state 289, "(1)"
+       line 415, "pan.___", state 324, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 342, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 356, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 382, "(1)"
+       line 257, "pan.___", state 394, "(1)"
+       line 261, "pan.___", state 402, "(1)"
+       line 411, "pan.___", state 430, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 444, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 462, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 476, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 494, "(1)"
+       line 253, "pan.___", state 502, "(1)"
+       line 257, "pan.___", state 514, "(1)"
+       line 261, "pan.___", state 522, "(1)"
+       line 411, "pan.___", state 541, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 543, "(1)"
+       line 411, "pan.___", state 544, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 544, "else"
+       line 411, "pan.___", state 547, "(1)"
+       line 415, "pan.___", state 555, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 557, "(1)"
+       line 415, "pan.___", state 558, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 558, "else"
+       line 415, "pan.___", state 561, "(1)"
+       line 415, "pan.___", state 562, "(1)"
+       line 415, "pan.___", state 562, "(1)"
+       line 413, "pan.___", state 567, "((i<1))"
+       line 413, "pan.___", state 567, "((i>=1))"
+       line 420, "pan.___", state 573, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 575, "(1)"
+       line 420, "pan.___", state 576, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 576, "else"
+       line 420, "pan.___", state 579, "(1)"
+       line 420, "pan.___", state 580, "(1)"
+       line 420, "pan.___", state 580, "(1)"
+       line 424, "pan.___", state 587, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 589, "(1)"
+       line 424, "pan.___", state 590, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 590, "else"
+       line 424, "pan.___", state 593, "(1)"
+       line 424, "pan.___", state 594, "(1)"
+       line 424, "pan.___", state 594, "(1)"
+       line 422, "pan.___", state 599, "((i<2))"
+       line 422, "pan.___", state 599, "((i>=2))"
+       line 249, "pan.___", state 605, "(1)"
+       line 253, "pan.___", state 613, "(1)"
+       line 253, "pan.___", state 614, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 614, "else"
+       line 251, "pan.___", state 619, "((i<1))"
+       line 251, "pan.___", state 619, "((i>=1))"
+       line 257, "pan.___", state 625, "(1)"
+       line 257, "pan.___", state 626, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 626, "else"
+       line 261, "pan.___", state 633, "(1)"
+       line 261, "pan.___", state 634, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 634, "else"
+       line 259, "pan.___", state 639, "((i<2))"
+       line 259, "pan.___", state 639, "((i>=2))"
+       line 266, "pan.___", state 643, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 643, "else"
+       line 431, "pan.___", state 645, "(1)"
+       line 431, "pan.___", state 645, "(1)"
+       line 1117, "pan.___", state 649, "_proc_urcu_writer = (_proc_urcu_writer|(1<<10))"
+       line 411, "pan.___", state 654, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 656, "(1)"
+       line 411, "pan.___", state 657, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 657, "else"
+       line 411, "pan.___", state 660, "(1)"
+       line 415, "pan.___", state 668, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 670, "(1)"
+       line 415, "pan.___", state 671, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 671, "else"
+       line 415, "pan.___", state 674, "(1)"
+       line 415, "pan.___", state 675, "(1)"
+       line 415, "pan.___", state 675, "(1)"
+       line 413, "pan.___", state 680, "((i<1))"
+       line 413, "pan.___", state 680, "((i>=1))"
+       line 420, "pan.___", state 686, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 688, "(1)"
+       line 420, "pan.___", state 689, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 689, "else"
+       line 420, "pan.___", state 692, "(1)"
+       line 420, "pan.___", state 693, "(1)"
+       line 420, "pan.___", state 693, "(1)"
+       line 424, "pan.___", state 700, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 702, "(1)"
+       line 424, "pan.___", state 703, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 703, "else"
+       line 424, "pan.___", state 706, "(1)"
+       line 424, "pan.___", state 707, "(1)"
+       line 424, "pan.___", state 707, "(1)"
+       line 422, "pan.___", state 712, "((i<2))"
+       line 422, "pan.___", state 712, "((i>=2))"
+       line 249, "pan.___", state 718, "(1)"
+       line 253, "pan.___", state 726, "(1)"
+       line 253, "pan.___", state 727, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 727, "else"
+       line 251, "pan.___", state 732, "((i<1))"
+       line 251, "pan.___", state 732, "((i>=1))"
+       line 257, "pan.___", state 738, "(1)"
+       line 257, "pan.___", state 739, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 739, "else"
+       line 261, "pan.___", state 746, "(1)"
+       line 261, "pan.___", state 747, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 747, "else"
+       line 259, "pan.___", state 752, "((i<2))"
+       line 259, "pan.___", state 752, "((i>=2))"
+       line 266, "pan.___", state 756, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 756, "else"
+       line 431, "pan.___", state 758, "(1)"
+       line 431, "pan.___", state 758, "(1)"
+       line 1133, "pan.___", state 763, "_proc_urcu_writer = (_proc_urcu_writer|(1<<11))"
+       line 1128, "pan.___", state 764, "(((tmp2&((1<<7)-1))&&((tmp2^0)&(1<<7))))"
+       line 1128, "pan.___", state 764, "else"
+       line 1153, "pan.___", state 768, "_proc_urcu_writer = (_proc_urcu_writer&~(((1<<12)|(1<<11))))"
+       line 272, "pan.___", state 799, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 808, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 823, "(1)"
+       line 284, "pan.___", state 830, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 846, "(1)"
+       line 253, "pan.___", state 854, "(1)"
+       line 257, "pan.___", state 866, "(1)"
+       line 261, "pan.___", state 874, "(1)"
+       line 272, "pan.___", state 905, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 914, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 927, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 936, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 952, "(1)"
+       line 253, "pan.___", state 960, "(1)"
+       line 257, "pan.___", state 972, "(1)"
+       line 261, "pan.___", state 980, "(1)"
+       line 276, "pan.___", state 1006, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1019, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1028, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1044, "(1)"
+       line 253, "pan.___", state 1052, "(1)"
+       line 257, "pan.___", state 1064, "(1)"
+       line 261, "pan.___", state 1072, "(1)"
+       line 272, "pan.___", state 1103, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1112, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1125, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1134, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1150, "(1)"
+       line 253, "pan.___", state 1158, "(1)"
+       line 257, "pan.___", state 1170, "(1)"
+       line 261, "pan.___", state 1178, "(1)"
+       line 272, "pan.___", state 1195, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 1197, "(1)"
+       line 276, "pan.___", state 1204, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1206, "(1)"
+       line 276, "pan.___", state 1207, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 1207, "else"
+       line 274, "pan.___", state 1212, "((i<1))"
+       line 274, "pan.___", state 1212, "((i>=1))"
+       line 280, "pan.___", state 1217, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1219, "(1)"
+       line 280, "pan.___", state 1220, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1220, "else"
+       line 284, "pan.___", state 1226, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1228, "(1)"
+       line 284, "pan.___", state 1229, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1229, "else"
+       line 282, "pan.___", state 1234, "((i<2))"
+       line 282, "pan.___", state 1234, "((i>=2))"
+       line 249, "pan.___", state 1242, "(1)"
+       line 253, "pan.___", state 1250, "(1)"
+       line 253, "pan.___", state 1251, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1251, "else"
+       line 251, "pan.___", state 1256, "((i<1))"
+       line 251, "pan.___", state 1256, "((i>=1))"
+       line 257, "pan.___", state 1262, "(1)"
+       line 257, "pan.___", state 1263, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1263, "else"
+       line 261, "pan.___", state 1270, "(1)"
+       line 261, "pan.___", state 1271, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1271, "else"
+       line 266, "pan.___", state 1280, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1280, "else"
+       line 1229, "pan.___", state 1283, "i = 0"
+       line 1229, "pan.___", state 1285, "reader_barrier = 1"
+       line 1229, "pan.___", state 1296, "((i<1))"
+       line 1229, "pan.___", state 1296, "((i>=1))"
+       line 272, "pan.___", state 1301, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 1303, "(1)"
+       line 276, "pan.___", state 1310, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1312, "(1)"
+       line 276, "pan.___", state 1313, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 1313, "else"
+       line 274, "pan.___", state 1318, "((i<1))"
+       line 274, "pan.___", state 1318, "((i>=1))"
+       line 280, "pan.___", state 1323, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1325, "(1)"
+       line 280, "pan.___", state 1326, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1326, "else"
+       line 284, "pan.___", state 1332, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1334, "(1)"
+       line 284, "pan.___", state 1335, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1335, "else"
+       line 282, "pan.___", state 1340, "((i<2))"
+       line 282, "pan.___", state 1340, "((i>=2))"
+       line 249, "pan.___", state 1348, "(1)"
+       line 253, "pan.___", state 1356, "(1)"
+       line 253, "pan.___", state 1357, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1357, "else"
+       line 251, "pan.___", state 1362, "((i<1))"
+       line 251, "pan.___", state 1362, "((i>=1))"
+       line 257, "pan.___", state 1368, "(1)"
+       line 257, "pan.___", state 1369, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1369, "else"
+       line 261, "pan.___", state 1376, "(1)"
+       line 261, "pan.___", state 1377, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1377, "else"
+       line 266, "pan.___", state 1386, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1386, "else"
+       line 299, "pan.___", state 1388, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 1388, "else"
+       line 1229, "pan.___", state 1389, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 1229, "pan.___", state 1389, "else"
+       line 276, "pan.___", state 1402, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1415, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1424, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1440, "(1)"
+       line 253, "pan.___", state 1448, "(1)"
+       line 257, "pan.___", state 1460, "(1)"
+       line 261, "pan.___", state 1468, "(1)"
+       line 272, "pan.___", state 1499, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1508, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1521, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1530, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1546, "(1)"
+       line 253, "pan.___", state 1554, "(1)"
+       line 257, "pan.___", state 1566, "(1)"
+       line 261, "pan.___", state 1574, "(1)"
+       line 1237, "pan.___", state 1590, "-end-"
+       (195 of 1590 states)
+unreached in proctype :init:
+       line 1248, "pan.___", state 9, "((j<2))"
+       line 1248, "pan.___", state 9, "((j>=2))"
+       line 1249, "pan.___", state 20, "((j<2))"
+       line 1249, "pan.___", state 20, "((j>=2))"
+       line 1254, "pan.___", state 33, "((j<2))"
+       line 1254, "pan.___", state 33, "((j>=2))"
+       line 1252, "pan.___", state 43, "((i<1))"
+       line 1252, "pan.___", state 43, "((i>=1))"
+       line 1262, "pan.___", state 54, "((j<2))"
+       line 1262, "pan.___", state 54, "((j>=2))"
+       line 1266, "pan.___", state 67, "((j<2))"
+       line 1266, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 982 seconds
+pan: rate  38391.44 states/second
+pan: avg transition delay 2.6765e-06 usec
+cp .input.spin urcu_free_single_flip.spin.input
+cp .input.spin.trail urcu_free_single_flip.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.spin.input b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.spin.input
new file mode 100644 (file)
index 0000000..6e6560d
--- /dev/null
@@ -0,0 +1,1273 @@
+#define SINGLE_FLIP
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.spin.input.trail b/formal-model/urcu-controldataflow-intel-ipi/urcu_free_single_flip.spin.input.trail
new file mode 100644 (file)
index 0000000..05857aa
--- /dev/null
@@ -0,0 +1,1301 @@
+-2:3:-2
+-4:-4:-4
+1:0:4179
+2:3:4099
+3:3:4102
+4:3:4102
+5:3:4105
+6:3:4113
+7:3:4113
+8:3:4116
+9:3:4122
+10:3:4126
+11:3:4126
+12:3:4129
+13:3:4139
+14:3:4147
+15:3:4147
+16:3:4150
+17:3:4156
+18:3:4160
+19:3:4160
+20:3:4163
+21:3:4169
+22:3:4173
+23:3:4174
+24:0:4179
+25:3:4176
+26:0:4179
+27:2:2511
+28:0:4179
+29:2:2517
+30:0:4179
+31:2:2518
+32:0:4179
+33:2:2520
+34:0:4179
+35:2:2521
+36:0:4179
+37:2:2522
+38:0:4179
+39:2:2523
+40:0:4179
+41:2:2524
+42:0:4179
+43:2:2525
+44:0:4179
+45:2:2526
+46:2:2527
+47:2:2531
+48:2:2532
+49:2:2540
+50:2:2541
+51:2:2545
+52:2:2546
+53:2:2554
+54:2:2559
+55:2:2563
+56:2:2564
+57:2:2572
+58:2:2573
+59:2:2577
+60:2:2578
+61:2:2572
+62:2:2573
+63:2:2577
+64:2:2578
+65:2:2586
+66:2:2591
+67:2:2598
+68:2:2599
+69:2:2606
+70:2:2611
+71:2:2618
+72:2:2619
+73:2:2618
+74:2:2619
+75:2:2626
+76:2:2636
+77:0:4179
+78:2:2525
+79:0:4179
+80:2:2640
+81:2:2644
+82:2:2645
+83:2:2649
+84:2:2653
+85:2:2654
+86:2:2658
+87:2:2666
+88:2:2667
+89:2:2671
+90:2:2675
+91:2:2676
+92:2:2671
+93:2:2672
+94:2:2680
+95:0:4179
+96:2:2525
+97:0:4179
+98:2:2688
+99:2:2689
+100:2:2690
+101:0:4179
+102:2:2525
+103:0:4179
+104:2:2695
+105:0:4179
+106:2:3308
+107:2:3309
+108:2:3313
+109:2:3317
+110:2:3318
+111:2:3322
+112:2:3327
+113:2:3335
+114:2:3339
+115:2:3340
+116:2:3335
+117:2:3339
+118:2:3340
+119:2:3344
+120:2:3351
+121:2:3358
+122:2:3359
+123:2:3366
+124:2:3371
+125:2:3378
+126:2:3379
+127:2:3378
+128:2:3379
+129:2:3386
+130:2:3390
+131:0:4179
+132:2:3395
+133:0:4179
+134:2:3396
+135:0:4179
+136:2:3397
+137:0:4179
+138:2:3398
+139:0:4179
+140:1:2
+141:0:4179
+142:2:3399
+143:0:4179
+144:1:8
+145:0:4179
+146:1:9
+147:0:4179
+148:2:3398
+149:0:4179
+150:1:10
+151:0:4179
+152:2:3399
+153:0:4179
+154:1:11
+155:0:4179
+156:2:3398
+157:0:4179
+158:1:12
+159:0:4179
+160:2:3399
+161:0:4179
+162:1:13
+163:0:4179
+164:2:3398
+165:0:4179
+166:1:14
+167:0:4179
+168:2:3399
+169:0:4179
+170:1:15
+171:0:4179
+172:1:16
+173:0:4179
+174:2:3398
+175:0:4179
+176:1:17
+177:0:4179
+178:2:3399
+179:0:4179
+180:1:26
+181:0:4179
+182:2:3398
+183:0:4179
+184:1:30
+185:1:31
+186:1:35
+187:1:39
+188:1:40
+189:1:44
+190:1:52
+191:1:53
+192:1:57
+193:1:61
+194:1:62
+195:1:57
+196:1:61
+197:1:62
+198:1:66
+199:1:73
+200:1:80
+201:1:81
+202:1:88
+203:1:93
+204:1:100
+205:1:101
+206:1:100
+207:1:101
+208:1:108
+209:1:112
+210:0:4179
+211:2:3399
+212:0:4179
+213:1:117
+214:0:4179
+215:2:3400
+216:0:4179
+217:2:3405
+218:0:4179
+219:2:3406
+220:0:4179
+221:2:3414
+222:2:3415
+223:2:3419
+224:2:3423
+225:2:3424
+226:2:3428
+227:2:3436
+228:2:3437
+229:2:3441
+230:2:3445
+231:2:3446
+232:2:3441
+233:2:3445
+234:2:3446
+235:2:3450
+236:2:3457
+237:2:3464
+238:2:3465
+239:2:3472
+240:2:3477
+241:2:3484
+242:2:3485
+243:2:3484
+244:2:3485
+245:2:3492
+246:2:3496
+247:0:4179
+248:2:2697
+249:2:3289
+250:0:4179
+251:2:2525
+252:0:4179
+253:2:2698
+254:0:4179
+255:2:2525
+256:0:4179
+257:2:2701
+258:2:2702
+259:2:2706
+260:2:2707
+261:2:2715
+262:2:2716
+263:2:2720
+264:2:2721
+265:2:2729
+266:2:2734
+267:2:2738
+268:2:2739
+269:2:2747
+270:2:2748
+271:2:2752
+272:2:2753
+273:2:2747
+274:2:2748
+275:2:2752
+276:2:2753
+277:2:2761
+278:2:2766
+279:2:2773
+280:2:2774
+281:2:2781
+282:2:2786
+283:2:2793
+284:2:2794
+285:2:2793
+286:2:2794
+287:2:2801
+288:2:2810
+289:0:4179
+290:2:2525
+291:0:4179
+292:2:2814
+293:2:2823
+294:2:2824
+295:2:2828
+296:2:2829
+297:2:2833
+298:2:2834
+299:2:2842
+300:2:2847
+301:2:2851
+302:2:2852
+303:2:2860
+304:2:2861
+305:2:2865
+306:2:2866
+307:2:2860
+308:2:2861
+309:2:2865
+310:2:2866
+311:2:2874
+312:2:2881
+313:2:2882
+314:2:2886
+315:2:2887
+316:2:2894
+317:2:2899
+318:2:2906
+319:2:2907
+320:2:2906
+321:2:2907
+322:2:2914
+323:2:2926
+324:2:2927
+325:0:4179
+326:2:2525
+327:0:4179
+328:2:3277
+329:0:4179
+330:1:118
+331:0:4179
+332:1:120
+333:0:4179
+334:1:19
+335:0:4179
+336:1:126
+337:1:127
+338:1:131
+339:1:132
+340:1:140
+341:1:141
+342:1:145
+343:1:146
+344:1:154
+345:1:159
+346:1:163
+347:1:164
+348:1:172
+349:1:173
+350:1:177
+351:1:178
+352:1:172
+353:1:173
+354:1:177
+355:1:178
+356:1:186
+357:1:191
+358:1:198
+359:1:199
+360:1:206
+361:1:211
+362:1:218
+363:1:219
+364:1:218
+365:1:219
+366:1:226
+367:0:4179
+368:1:15
+369:0:4179
+370:1:16
+371:0:4179
+372:1:17
+373:0:4179
+374:1:118
+375:0:4179
+376:1:120
+377:0:4179
+378:1:19
+379:0:4179
+380:1:237
+381:1:238
+382:0:4179
+383:1:15
+384:0:4179
+385:1:16
+386:0:4179
+387:1:17
+388:0:4179
+389:1:118
+390:0:4179
+391:1:120
+392:0:4179
+393:1:19
+394:0:4179
+395:1:244
+396:1:245
+397:1:249
+398:1:250
+399:1:258
+400:1:259
+401:1:263
+402:1:264
+403:1:272
+404:1:277
+405:1:281
+406:1:282
+407:1:290
+408:1:291
+409:1:295
+410:1:296
+411:1:290
+412:1:291
+413:1:295
+414:1:296
+415:1:304
+416:1:309
+417:1:316
+418:1:317
+419:1:324
+420:1:329
+421:1:336
+422:1:337
+423:1:336
+424:1:337
+425:1:344
+426:0:4179
+427:1:15
+428:0:4179
+429:1:16
+430:0:4179
+431:2:3899
+432:2:3907
+433:2:3911
+434:2:3912
+435:2:3916
+436:2:3924
+437:2:3925
+438:2:3929
+439:2:3933
+440:2:3934
+441:2:3929
+442:2:3933
+443:2:3934
+444:2:3938
+445:2:3945
+446:2:3952
+447:2:3953
+448:2:3960
+449:2:3965
+450:2:3972
+451:2:3973
+452:2:3972
+453:2:3973
+454:2:3980
+455:2:3984
+456:0:4179
+457:2:3989
+458:0:4179
+459:2:3990
+460:0:4179
+461:2:3991
+462:0:4179
+463:2:3992
+464:0:4179
+465:1:17
+466:0:4179
+467:2:3993
+468:0:4179
+469:1:26
+470:0:4179
+471:2:3992
+472:0:4179
+473:1:30
+474:1:31
+475:1:35
+476:1:39
+477:1:40
+478:1:44
+479:1:52
+480:1:53
+481:1:57
+482:1:61
+483:1:62
+484:1:57
+485:1:61
+486:1:62
+487:1:66
+488:1:73
+489:1:80
+490:1:81
+491:1:88
+492:1:93
+493:1:100
+494:1:101
+495:1:100
+496:1:101
+497:1:108
+498:1:112
+499:0:4179
+500:2:3993
+501:0:4179
+502:1:117
+503:0:4179
+504:2:3994
+505:0:4179
+506:2:3999
+507:0:4179
+508:2:4000
+509:0:4179
+510:2:4008
+511:2:4009
+512:2:4013
+513:2:4017
+514:2:4018
+515:2:4022
+516:2:4030
+517:2:4031
+518:2:4035
+519:2:4039
+520:2:4040
+521:2:4035
+522:2:4039
+523:2:4040
+524:2:4044
+525:2:4051
+526:2:4058
+527:2:4059
+528:2:4066
+529:2:4071
+530:2:4078
+531:2:4079
+532:2:4078
+533:2:4079
+534:2:4086
+535:2:4090
+536:0:4179
+537:2:3279
+538:2:3289
+539:0:4179
+540:2:2525
+541:0:4179
+542:2:3280
+543:2:3281
+544:0:4179
+545:2:2525
+546:0:4179
+547:2:3285
+548:0:4179
+549:2:3293
+550:0:4179
+551:2:2518
+552:0:4179
+553:2:2520
+554:0:4179
+555:2:2521
+556:0:4179
+557:2:2522
+558:0:4179
+559:2:2523
+560:0:4179
+561:2:2524
+562:0:4179
+563:2:2525
+564:0:4179
+565:2:2526
+566:2:2527
+567:2:2531
+568:2:2532
+569:2:2540
+570:2:2541
+571:2:2545
+572:2:2546
+573:2:2554
+574:2:2559
+575:2:2563
+576:2:2564
+577:2:2572
+578:2:2573
+579:2:2574
+580:2:2572
+581:2:2573
+582:2:2577
+583:2:2578
+584:2:2586
+585:2:2591
+586:2:2598
+587:2:2599
+588:2:2606
+589:2:2611
+590:2:2618
+591:2:2619
+592:2:2618
+593:2:2619
+594:2:2626
+595:2:2636
+596:0:4179
+597:2:2525
+598:0:4179
+599:2:2640
+600:2:2644
+601:2:2645
+602:2:2649
+603:2:2653
+604:2:2654
+605:2:2658
+606:2:2666
+607:2:2667
+608:2:2671
+609:2:2672
+610:2:2671
+611:2:2675
+612:2:2676
+613:2:2680
+614:0:4179
+615:2:2525
+616:0:4179
+617:2:2688
+618:2:2689
+619:2:2690
+620:0:4179
+621:2:2525
+622:0:4179
+623:2:2695
+624:0:4179
+625:1:118
+626:0:4179
+627:1:120
+628:0:4179
+629:1:19
+630:0:4179
+631:1:355
+632:1:356
+633:1:360
+634:1:361
+635:1:369
+636:1:370
+637:1:374
+638:1:375
+639:1:383
+640:1:388
+641:1:392
+642:1:393
+643:1:401
+644:1:402
+645:1:406
+646:1:407
+647:1:401
+648:1:402
+649:1:406
+650:1:407
+651:1:415
+652:1:420
+653:1:427
+654:1:428
+655:1:435
+656:1:440
+657:1:447
+658:1:448
+659:1:447
+660:1:448
+661:1:455
+662:1:464
+663:0:4179
+664:1:15
+665:0:4179
+666:1:16
+667:0:4179
+668:1:17
+669:0:4179
+670:1:118
+671:0:4179
+672:1:120
+673:0:4179
+674:1:19
+675:0:4179
+676:1:584
+677:1:585
+678:1:589
+679:1:590
+680:1:598
+681:1:599
+682:1:600
+683:1:612
+684:1:617
+685:1:621
+686:1:622
+687:1:630
+688:1:631
+689:1:635
+690:1:636
+691:1:630
+692:1:631
+693:1:635
+694:1:636
+695:1:644
+696:1:649
+697:1:656
+698:1:657
+699:1:664
+700:1:669
+701:1:676
+702:1:677
+703:1:676
+704:1:677
+705:1:684
+706:0:4179
+707:1:15
+708:0:4179
+709:1:16
+710:0:4179
+711:1:17
+712:0:4179
+713:1:118
+714:0:4179
+715:1:120
+716:0:4179
+717:1:19
+718:0:4179
+719:1:695
+720:1:698
+721:1:699
+722:0:4179
+723:1:15
+724:0:4179
+725:1:16
+726:0:4179
+727:1:17
+728:0:4179
+729:1:118
+730:0:4179
+731:1:120
+732:0:4179
+733:1:19
+734:0:4179
+735:1:702
+736:1:703
+737:1:707
+738:1:708
+739:1:716
+740:1:717
+741:1:721
+742:1:722
+743:1:730
+744:1:735
+745:1:739
+746:1:740
+747:1:748
+748:1:749
+749:1:753
+750:1:754
+751:1:748
+752:1:749
+753:1:753
+754:1:754
+755:1:762
+756:1:767
+757:1:774
+758:1:775
+759:1:782
+760:1:787
+761:1:794
+762:1:795
+763:1:794
+764:1:795
+765:1:802
+766:0:4179
+767:1:15
+768:0:4179
+769:1:16
+770:0:4179
+771:1:17
+772:0:4179
+773:1:118
+774:0:4179
+775:1:120
+776:0:4179
+777:1:19
+778:0:4179
+779:1:926
+780:1:927
+781:1:931
+782:1:932
+783:1:940
+784:1:941
+785:1:945
+786:1:946
+787:1:954
+788:1:959
+789:1:963
+790:1:964
+791:1:972
+792:1:973
+793:1:977
+794:1:978
+795:1:972
+796:1:973
+797:1:977
+798:1:978
+799:1:986
+800:1:991
+801:1:998
+802:1:999
+803:1:1006
+804:1:1011
+805:1:1018
+806:1:1019
+807:1:1018
+808:1:1019
+809:1:1026
+810:1:1035
+811:1:1039
+812:0:4179
+813:1:15
+814:0:4179
+815:1:16
+816:0:4179
+817:1:17
+818:0:4179
+819:1:118
+820:0:4179
+821:1:120
+822:0:4179
+823:1:19
+824:0:4179
+825:1:1040
+826:1:1041
+827:1:1045
+828:1:1046
+829:1:1054
+830:1:1055
+831:1:1056
+832:1:1068
+833:1:1073
+834:1:1077
+835:1:1078
+836:1:1086
+837:1:1087
+838:1:1091
+839:1:1092
+840:1:1086
+841:1:1087
+842:1:1091
+843:1:1092
+844:1:1100
+845:1:1105
+846:1:1112
+847:1:1113
+848:1:1120
+849:1:1125
+850:1:1132
+851:1:1133
+852:1:1132
+853:1:1133
+854:1:1140
+855:0:4179
+856:1:15
+857:0:4179
+858:1:16
+859:0:4179
+860:2:3308
+861:2:3309
+862:2:3313
+863:2:3317
+864:2:3318
+865:2:3322
+866:2:3327
+867:2:3335
+868:2:3339
+869:2:3340
+870:2:3335
+871:2:3339
+872:2:3340
+873:2:3344
+874:2:3351
+875:2:3358
+876:2:3359
+877:2:3366
+878:2:3371
+879:2:3378
+880:2:3379
+881:2:3378
+882:2:3379
+883:2:3386
+884:2:3390
+885:0:4179
+886:2:3395
+887:0:4179
+888:2:3396
+889:0:4179
+890:2:3397
+891:0:4179
+892:2:3398
+893:0:4179
+894:1:17
+895:0:4179
+896:2:3399
+897:0:4179
+898:1:26
+899:0:4179
+900:2:3398
+901:0:4179
+902:1:30
+903:1:31
+904:1:35
+905:1:39
+906:1:40
+907:1:44
+908:1:52
+909:1:53
+910:1:57
+911:1:61
+912:1:62
+913:1:57
+914:1:61
+915:1:62
+916:1:66
+917:1:73
+918:1:80
+919:1:81
+920:1:88
+921:1:93
+922:1:100
+923:1:101
+924:1:100
+925:1:101
+926:1:108
+927:1:112
+928:0:4179
+929:2:3399
+930:0:4179
+931:1:117
+932:0:4179
+933:2:3400
+934:0:4179
+935:2:3405
+936:0:4179
+937:2:3406
+938:0:4179
+939:2:3414
+940:2:3415
+941:2:3419
+942:2:3423
+943:2:3424
+944:2:3428
+945:2:3436
+946:2:3437
+947:2:3441
+948:2:3445
+949:2:3446
+950:2:3441
+951:2:3445
+952:2:3446
+953:2:3450
+954:2:3457
+955:2:3464
+956:2:3465
+957:2:3472
+958:2:3477
+959:2:3484
+960:2:3485
+961:2:3484
+962:2:3485
+963:2:3492
+964:2:3496
+965:0:4179
+966:2:2697
+967:2:3289
+968:0:4179
+969:2:2525
+970:0:4179
+971:2:2698
+972:0:4179
+973:2:2525
+974:0:4179
+975:2:2701
+976:2:2702
+977:2:2706
+978:2:2707
+979:2:2715
+980:2:2716
+981:2:2720
+982:2:2721
+983:2:2729
+984:2:2734
+985:2:2738
+986:2:2739
+987:2:2747
+988:2:2748
+989:2:2752
+990:2:2753
+991:2:2747
+992:2:2748
+993:2:2752
+994:2:2753
+995:2:2761
+996:2:2766
+997:2:2773
+998:2:2774
+999:2:2781
+1000:2:2786
+1001:2:2793
+1002:2:2794
+1003:2:2793
+1004:2:2794
+1005:2:2801
+1006:2:2810
+1007:0:4179
+1008:2:2525
+1009:0:4179
+1010:2:2814
+1011:2:2815
+1012:2:2816
+1013:2:2828
+1014:2:2829
+1015:2:2833
+1016:2:2834
+1017:2:2842
+1018:2:2847
+1019:2:2851
+1020:2:2852
+1021:2:2860
+1022:2:2861
+1023:2:2865
+1024:2:2866
+1025:2:2860
+1026:2:2861
+1027:2:2865
+1028:2:2866
+1029:2:2874
+1030:2:2879
+1031:2:2886
+1032:2:2887
+1033:2:2894
+1034:2:2899
+1035:2:2906
+1036:2:2907
+1037:2:2906
+1038:2:2907
+1039:2:2914
+1040:2:2926
+1041:2:2927
+1042:0:4179
+1043:2:2525
+1044:0:4179
+1045:2:3277
+1046:0:4179
+1047:2:3902
+1048:2:3903
+1049:2:3907
+1050:2:3911
+1051:2:3912
+1052:2:3916
+1053:2:3924
+1054:2:3925
+1055:2:3929
+1056:2:3933
+1057:2:3934
+1058:2:3929
+1059:2:3933
+1060:2:3934
+1061:2:3938
+1062:2:3945
+1063:2:3952
+1064:2:3953
+1065:2:3960
+1066:2:3965
+1067:2:3972
+1068:2:3973
+1069:2:3972
+1070:2:3973
+1071:2:3980
+1072:2:3984
+1073:0:4179
+1074:2:3989
+1075:0:4179
+1076:2:3990
+1077:0:4179
+1078:2:3991
+1079:0:4179
+1080:2:3992
+1081:0:4179
+1082:1:26
+1083:0:4179
+1084:2:3993
+1085:0:4179
+1086:1:30
+1087:1:31
+1088:1:35
+1089:1:39
+1090:1:40
+1091:1:44
+1092:1:52
+1093:1:53
+1094:1:57
+1095:1:61
+1096:1:62
+1097:1:57
+1098:1:61
+1099:1:62
+1100:1:66
+1101:1:73
+1102:1:80
+1103:1:81
+1104:1:88
+1105:1:93
+1106:1:100
+1107:1:101
+1108:1:100
+1109:1:101
+1110:1:108
+1111:1:112
+1112:0:4179
+1113:2:3992
+1114:0:4179
+1115:1:117
+1116:0:4179
+1117:2:3993
+1118:0:4179
+1119:2:3994
+1120:0:4179
+1121:2:3999
+1122:0:4179
+1123:2:4000
+1124:0:4179
+1125:2:4008
+1126:2:4009
+1127:2:4013
+1128:2:4017
+1129:2:4018
+1130:2:4022
+1131:2:4030
+1132:2:4031
+1133:2:4035
+1134:2:4039
+1135:2:4040
+1136:2:4035
+1137:2:4039
+1138:2:4040
+1139:2:4044
+1140:2:4051
+1141:2:4058
+1142:2:4059
+1143:2:4066
+1144:2:4071
+1145:2:4078
+1146:2:4079
+1147:2:4078
+1148:2:4079
+1149:2:4086
+1150:2:4090
+1151:0:4179
+1152:2:3279
+1153:2:3289
+1154:0:4179
+1155:2:2525
+1156:0:4179
+1157:2:3280
+1158:2:3281
+1159:0:4179
+1160:2:2525
+1161:0:4179
+1162:2:3285
+1163:0:4179
+1164:2:3293
+1165:0:4179
+1166:2:2518
+1167:0:4179
+1168:2:2520
+1169:0:4179
+1170:2:2521
+1171:0:4179
+1172:2:2522
+1173:0:4179
+1174:2:2523
+1175:0:4179
+1176:2:2524
+1177:0:4179
+1178:2:2525
+1179:0:4179
+1180:2:2526
+1181:2:2527
+1182:2:2531
+1183:2:2532
+1184:2:2540
+1185:2:2541
+1186:2:2545
+1187:2:2546
+1188:2:2554
+1189:2:2559
+1190:2:2563
+1191:2:2564
+1192:2:2572
+1193:2:2573
+1194:2:2577
+1195:2:2578
+1196:2:2572
+1197:2:2573
+1198:2:2574
+1199:2:2586
+1200:2:2591
+1201:2:2598
+1202:2:2599
+1203:2:2606
+1204:2:2611
+1205:2:2618
+1206:2:2619
+1207:2:2618
+1208:2:2619
+1209:2:2626
+1210:2:2636
+1211:0:4179
+1212:2:2525
+1213:0:4179
+1214:1:118
+1215:0:4179
+1216:1:120
+1217:0:4179
+1218:1:19
+1219:0:4179
+1220:1:1151
+1221:0:4179
+1222:1:2417
+1223:1:2424
+1224:1:2425
+1225:1:2432
+1226:1:2437
+1227:1:2444
+1228:1:2445
+1229:1:2444
+1230:1:2445
+1231:1:2452
+1232:1:2456
+1233:0:4179
+1234:2:2640
+1235:2:2644
+1236:2:2645
+1237:2:2649
+1238:2:2653
+1239:2:2654
+1240:2:2658
+1241:2:2666
+1242:2:2667
+1243:2:2671
+1244:2:2675
+1245:2:2676
+1246:2:2671
+1247:2:2672
+1248:2:2680
+1249:0:4179
+1250:2:2525
+1251:0:4179
+1252:2:2688
+1253:2:2689
+1254:2:2690
+1255:0:4179
+1256:2:2525
+1257:0:4179
+1258:2:2695
+1259:0:4179
+1260:2:3308
+1261:2:3309
+1262:2:3313
+1263:2:3317
+1264:2:3318
+1265:2:3322
+1266:2:3327
+1267:2:3335
+1268:2:3339
+1269:2:3340
+1270:2:3335
+1271:2:3339
+1272:2:3340
+1273:2:3344
+1274:2:3351
+1275:2:3358
+1276:2:3359
+1277:2:3366
+1278:2:3371
+1279:2:3378
+1280:2:3379
+1281:2:3378
+1282:2:3379
+1283:2:3386
+1284:2:3390
+1285:0:4179
+1286:2:3395
+1287:0:4179
+1288:2:3396
+1289:0:4179
+1290:2:3397
+1291:0:4179
+1292:2:3398
+1293:0:4179
+1294:1:1153
+1295:1:1154
+1296:0:4177
+1297:2:3399
+1298:0:4183
+1299:1:2145
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_progress.ltl b/formal-model/urcu-controldataflow-intel-ipi/urcu_progress.ltl
new file mode 100644 (file)
index 0000000..8718641
--- /dev/null
@@ -0,0 +1 @@
+([] <> !np_)
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_progress_reader.define b/formal-model/urcu-controldataflow-intel-ipi/urcu_progress_reader.define
new file mode 100644 (file)
index 0000000..ff3f783
--- /dev/null
@@ -0,0 +1 @@
+#define READER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_progress_writer.define b/formal-model/urcu-controldataflow-intel-ipi/urcu_progress_writer.define
new file mode 100644 (file)
index 0000000..1e4417f
--- /dev/null
@@ -0,0 +1 @@
+#define WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_progress_writer.log b/formal-model/urcu-controldataflow-intel-ipi/urcu_progress_writer.log
new file mode 100644 (file)
index 0000000..e238402
--- /dev/null
@@ -0,0 +1,256 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+depth 23: Claim reached state 9 (line 1300)
+depth 51: Claim reached state 9 (line 1299)
+Depth=    7071 States=    1e+06 Transitions= 1.16e+07 Memory=   514.397        t=   30.7 R=   3e+04
+Depth=    7071 States=    2e+06 Transitions= 2.31e+07 Memory=   566.252        t=   61.5 R=   3e+04
+Depth=    7071 States=    3e+06 Transitions= 3.43e+07 Memory=   617.326        t=   91.6 R=   3e+04
+pan: resizing hashtable to -w22..  done
+Depth=    8815 States=    4e+06 Transitions= 4.57e+07 Memory=   696.592        t=    122 R=   3e+04
+Depth=    8815 States=    5e+06 Transitions= 5.73e+07 Memory=   743.955        t=    153 R=   3e+04
+Depth=    8815 States=    6e+06 Transitions= 7.04e+07 Memory=   797.373        t=    190 R=   3e+04
+Depth=    8815 States=    7e+06 Transitions= 8.38e+07 Memory=   847.861        t=    226 R=   3e+04
+Depth=    8815 States=    8e+06 Transitions= 9.59e+07 Memory=   898.057        t=    259 R=   3e+04
+Depth=    8815 States=    9e+06 Transitions= 1.08e+08 Memory=   948.350        t=    293 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=    8815 States=    1e+07 Transitions= 1.29e+08 Memory=  1123.615        t=    352 R=   3e+04
+Depth=    8815 States=  1.1e+07 Transitions= 1.41e+08 Memory=  1175.764        t=    385 R=   3e+04
+Depth=    8815 States=  1.2e+07 Transitions= 1.53e+08 Memory=  1225.471        t=    417 R=   3e+04
+Depth=    8815 States=  1.3e+07 Transitions= 1.64e+08 Memory=  1276.057        t=    447 R=   3e+04
+Depth=    8815 States=  1.4e+07 Transitions= 1.75e+08 Memory=  1330.451        t=    477 R=   3e+04
+Depth=    8815 States=  1.5e+07 Transitions= 1.87e+08 Memory=  1379.670        t=    508 R=   3e+04
+Depth=    8815 States=  1.6e+07 Transitions= 1.98e+08 Memory=  1429.865        t=    538 R=   3e+04
+Depth=    8815 States=  1.7e+07 Transitions= 2.09e+08 Memory=  1484.748        t=    568 R=   3e+04
+Depth=    8815 States=  1.8e+07 Transitions= 2.23e+08 Memory=  1529.670        t=    606 R=   3e+04
+Depth=    8815 States=  1.9e+07 Transitions= 2.36e+08 Memory=  1579.377        t=    643 R=   3e+04
+Depth=    8815 States=    2e+07 Transitions= 2.57e+08 Memory=  1629.279        t=    701 R=   3e+04
+Depth=    8815 States=  2.1e+07 Transitions= 2.93e+08 Memory=  1686.115        t=    807 R=   3e+04
+Depth=    9016 States=  2.2e+07 Transitions= 3.11e+08 Memory=  1741.193        t=    858 R=   3e+04
+Depth=    9016 States=  2.3e+07 Transitions= 3.25e+08 Memory=  1793.733        t=    897 R=   3e+04
+Depth=    9016 States=  2.4e+07 Transitions= 3.45e+08 Memory=  1846.272        t=    954 R=   3e+04
+Depth=    9016 States=  2.5e+07 Transitions= 3.67e+08 Memory=  1897.639        t= 1.02e+03 R=   2e+04
+Depth=    9016 States=  2.6e+07 Transitions= 3.84e+08 Memory=  1951.447        t= 1.06e+03 R=   2e+04
+Depth=    9016 States=  2.7e+07 Transitions= 4.07e+08 Memory=  2004.670        t= 1.13e+03 R=   2e+04
+Depth=    9016 States=  2.8e+07 Transitions= 4.29e+08 Memory=  2050.764        t= 1.19e+03 R=   2e+04
+Depth=    9016 States=  2.9e+07 Transitions= 4.48e+08 Memory=  2100.373        t= 1.25e+03 R=   2e+04
+Depth=    9016 States=    3e+07 Transitions= 4.66e+08 Memory=  2151.545        t= 1.3e+03 R=   2e+04
+Depth=    9016 States=  3.1e+07 Transitions= 4.86e+08 Memory=  2201.838        t= 1.36e+03 R=   2e+04
+Depth=    9016 States=  3.2e+07 Transitions=  5.1e+08 Memory=  2250.861        t= 1.43e+03 R=   2e+04
+Depth=    9016 States=  3.3e+07 Transitions= 5.29e+08 Memory=  2299.983        t= 1.48e+03 R=   2e+04
+Depth=    9016 States=  3.4e+07 Transitions= 5.47e+08 Memory=  2351.057        t= 1.53e+03 R=   2e+04
+pan: resizing hashtable to -w26..  done
+Depth=    9016 States=  3.5e+07 Transitions= 5.69e+08 Memory=  2896.943        t= 1.6e+03 R=   2e+04
+Depth=    9016 States=  3.6e+07 Transitions= 5.95e+08 Memory=  2945.186        t= 1.67e+03 R=   2e+04
+Depth=    9016 States=  3.7e+07 Transitions= 6.12e+08 Memory=  2995.576        t= 1.72e+03 R=   2e+04
+Depth=    9016 States=  3.8e+07 Transitions= 6.29e+08 Memory=  3042.256        t= 1.77e+03 R=   2e+04
+Depth=    9016 States=  3.9e+07 Transitions= 6.42e+08 Memory=  3093.135        t= 1.8e+03 R=   2e+04
+Depth=    9016 States=    4e+07 Transitions= 6.56e+08 Memory=  3146.651        t= 1.84e+03 R=   2e+04
+Depth=    9016 States=  4.1e+07 Transitions= 6.69e+08 Memory=  3202.901        t= 1.88e+03 R=   2e+04
+Depth=    9016 States=  4.2e+07 Transitions= 6.82e+08 Memory=  3254.951        t= 1.91e+03 R=   2e+04
+Depth=    9016 States=  4.3e+07 Transitions= 6.95e+08 Memory=  3309.053        t= 1.95e+03 R=   2e+04
+Depth=    9016 States=  4.4e+07 Transitions= 7.07e+08 Memory=  3360.615        t= 1.98e+03 R=   2e+04
+Depth=    9016 States=  4.5e+07 Transitions= 7.28e+08 Memory=  3410.713        t= 2.04e+03 R=   2e+04
+Depth=    9016 States=  4.6e+07 Transitions= 7.63e+08 Memory=  3466.963        t= 2.14e+03 R=   2e+04
+Depth=    9016 States=  4.7e+07 Transitions= 7.99e+08 Memory=  3532.295        t= 2.25e+03 R=   2e+04
+Depth=    9016 States=  4.8e+07 Transitions= 8.49e+08 Memory=  3594.307        t= 2.4e+03 R=   2e+04
+Depth=    9016 States=  4.9e+07 Transitions= 8.91e+08 Memory=  3646.455        t= 2.52e+03 R=   2e+04
+Depth=    9016 States=    5e+07 Transitions= 9.24e+08 Memory=  3689.912        t= 2.62e+03 R=   2e+04
+Depth=    9016 States=  5.1e+07 Transitions= 9.39e+08 Memory=  3742.940        t= 2.66e+03 R=   2e+04
+Depth=    9016 States=  5.2e+07 Transitions=  9.6e+08 Memory=  3793.721        t= 2.72e+03 R=   2e+04
+Depth=    9016 States=  5.3e+07 Transitions= 9.99e+08 Memory=  3843.721        t= 2.84e+03 R=   2e+04
+Depth=    9016 States=  5.4e+07 Transitions= 1.01e+09 Memory=  3899.287        t= 2.88e+03 R=   2e+04
+Depth=    9016 States=  5.5e+07 Transitions= 1.03e+09 Memory=  3947.627        t= 2.93e+03 R=   2e+04
+Depth=    9016 States=  5.6e+07 Transitions= 1.05e+09 Memory=  3995.479        t= 2.97e+03 R=   2e+04
+Depth=    9016 States=  5.7e+07 Transitions= 1.06e+09 Memory=  4049.776        t= 3.01e+03 R=   2e+04
+Depth=    9016 States=  5.8e+07 Transitions= 1.09e+09 Memory=  4100.947        t= 3.09e+03 R=   2e+04
+Depth=    9016 States=  5.9e+07 Transitions=  1.1e+09 Memory=  4151.533        t= 3.13e+03 R=   2e+04
+Depth=    9016 States=    6e+07 Transitions= 1.13e+09 Memory=  4201.924        t= 3.22e+03 R=   2e+04
+Depth=    9016 States=  6.1e+07 Transitions= 1.15e+09 Memory=  4251.240        t= 3.28e+03 R=   2e+04
+Depth=    9016 States=  6.2e+07 Transitions= 1.19e+09 Memory=  4303.779        t= 3.38e+03 R=   2e+04
+Depth=    9016 States=  6.3e+07 Transitions= 1.21e+09 Memory=  4352.998        t= 3.43e+03 R=   2e+04
+Depth=    9016 States=  6.4e+07 Transitions= 1.24e+09 Memory=  4403.096        t= 3.53e+03 R=   2e+04
+Depth=    9016 States=  6.5e+07 Transitions= 1.26e+09 Memory=  4453.682        t= 3.59e+03 R=   2e+04
+Depth=    9016 States=  6.6e+07 Transitions= 1.28e+09 Memory=  4503.096        t= 3.66e+03 R=   2e+04
+Depth=    9016 States=  6.7e+07 Transitions=  1.3e+09 Memory=  4554.072        t= 3.71e+03 R=   2e+04
+Depth=    9016 States=  6.8e+07 Transitions= 1.33e+09 Memory=  4604.951        t= 3.79e+03 R=   2e+04
+Depth=    9016 States=  6.9e+07 Transitions= 1.36e+09 Memory=  4656.221        t= 3.87e+03 R=   2e+04
+Depth=    9523 States=    7e+07 Transitions= 1.38e+09 Memory=  4712.959        t= 3.93e+03 R=   2e+04
+Depth=    9523 States=  7.1e+07 Transitions= 1.39e+09 Memory=  4759.443        t= 3.97e+03 R=   2e+04
+Depth=    9523 States=  7.2e+07 Transitions= 1.41e+09 Memory=  4814.033        t= 4.03e+03 R=   2e+04
+Depth=    9554 States=  7.3e+07 Transitions= 1.44e+09 Memory=  4868.037        t= 4.09e+03 R=   2e+04
+Depth=    9554 States=  7.4e+07 Transitions= 1.45e+09 Memory=  4915.401        t= 4.13e+03 R=   2e+04
+Depth=    9554 States=  7.5e+07 Transitions= 1.47e+09 Memory=  4970.479        t= 4.2e+03 R=   2e+04
+Depth=    9554 States=  7.6e+07 Transitions= 1.49e+09 Memory=  5022.334        t= 4.26e+03 R=   2e+04
+Depth=    9554 States=  7.7e+07 Transitions= 1.51e+09 Memory=  5071.358        t= 4.3e+03 R=   2e+04
+Depth=    9554 States=  7.8e+07 Transitions= 1.52e+09 Memory=  5124.971        t= 4.33e+03 R=   2e+04
+Depth=    9554 States=  7.9e+07 Transitions= 1.54e+09 Memory=  5178.779        t= 4.4e+03 R=   2e+04
+Depth=    9554 States=    8e+07 Transitions= 1.56e+09 Memory=  5231.221        t= 4.46e+03 R=   2e+04
+Depth=    9554 States=  8.1e+07 Transitions= 1.58e+09 Memory=  5284.541        t= 4.51e+03 R=   2e+04
+Depth=    9554 States=  8.2e+07 Transitions=  1.6e+09 Memory=  5335.029        t= 4.55e+03 R=   2e+04
+Depth=    9554 States=  8.3e+07 Transitions= 1.61e+09 Memory=  5385.908        t= 4.6e+03 R=   2e+04
+Depth=    9554 States=  8.4e+07 Transitions= 1.63e+09 Memory=  5437.276        t= 4.64e+03 R=   2e+04
+Depth=    9554 States=  8.5e+07 Transitions= 1.65e+09 Memory=  5486.787        t= 4.69e+03 R=   2e+04
+Depth=    9554 States=  8.6e+07 Transitions= 1.67e+09 Memory=  5535.615        t= 4.76e+03 R=   2e+04
+Depth=    9554 States=  8.7e+07 Transitions= 1.69e+09 Memory=  5589.522        t= 4.81e+03 R=   2e+04
+Depth=    9554 States=  8.8e+07 Transitions=  1.7e+09 Memory=  5647.627        t= 4.86e+03 R=   2e+04
+Depth=    9554 States=  8.9e+07 Transitions= 1.72e+09 Memory=  5695.967        t= 4.9e+03 R=   2e+04
+Depth=    9554 States=    9e+07 Transitions= 1.74e+09 Memory=  5749.971        t= 4.95e+03 R=   2e+04
+Depth=    9554 States=  9.1e+07 Transitions= 1.76e+09 Memory=  5799.483        t= 5.01e+03 R=   2e+04
+Depth=    9554 States=  9.2e+07 Transitions= 1.78e+09 Memory=  5849.092        t= 5.06e+03 R=   2e+04
+Depth=    9554 States=  9.3e+07 Transitions=  1.8e+09 Memory=  5897.529        t= 5.12e+03 R=   2e+04
+Depth=    9554 States=  9.4e+07 Transitions= 1.82e+09 Memory=  5949.287        t= 5.19e+03 R=   2e+04
+Depth=    9554 States=  9.5e+07 Transitions= 1.84e+09 Memory=  6004.072        t= 5.25e+03 R=   2e+04
+Depth=    9554 States=  9.6e+07 Transitions= 1.86e+09 Memory=  6052.412        t= 5.29e+03 R=   2e+04
+Depth=    9554 States=  9.7e+07 Transitions= 1.88e+09 Memory=  6107.588        t= 5.36e+03 R=   2e+04
+Depth=    9554 States=  9.8e+07 Transitions=  1.9e+09 Memory=  6160.029        t= 5.4e+03 R=   2e+04
+Depth=    9554 States=  9.9e+07 Transitions= 1.91e+09 Memory=  6211.006        t= 5.44e+03 R=   2e+04
+Depth=    9554 States=    1e+08 Transitions= 1.93e+09 Memory=  6262.861        t= 5.49e+03 R=   2e+04
+Depth=    9554 States= 1.01e+08 Transitions= 1.94e+09 Memory=  6317.256        t= 5.54e+03 R=   2e+04
+Depth=    9554 States= 1.02e+08 Transitions= 1.96e+09 Memory=  6371.455        t= 5.58e+03 R=   2e+04
+Depth=    9554 States= 1.03e+08 Transitions= 1.98e+09 Memory=  6423.311        t= 5.64e+03 R=   2e+04
+Depth=    9554 States= 1.04e+08 Transitions=    2e+09 Memory=  6477.608        t= 5.71e+03 R=   2e+04
+Depth=    9554 States= 1.05e+08 Transitions= 2.02e+09 Memory=  6526.045        t= 5.75e+03 R=   2e+04
+Depth=    9554 States= 1.06e+08 Transitions= 2.04e+09 Memory=  6580.733        t= 5.81e+03 R=   2e+04
+Depth=    9554 States= 1.07e+08 Transitions= 2.06e+09 Memory=  6634.053        t= 5.87e+03 R=   2e+04
+Depth=    9554 States= 1.08e+08 Transitions= 2.07e+09 Memory=  6685.127        t= 5.9e+03 R=   2e+04
+Depth=    9554 States= 1.09e+08 Transitions= 2.09e+09 Memory=  6736.299        t= 5.96e+03 R=   2e+04
+Depth=    9554 States=  1.1e+08 Transitions= 2.11e+09 Memory=  6790.401        t=  6e+03 R=   2e+04
+Depth=    9554 States= 1.11e+08 Transitions= 2.12e+09 Memory=  6844.990        t= 6.05e+03 R=   2e+04
+Depth=    9554 States= 1.12e+08 Transitions= 2.14e+09 Memory=  6890.010        t= 6.09e+03 R=   2e+04
+Depth=    9554 States= 1.13e+08 Transitions= 2.16e+09 Memory=  6940.889        t= 6.14e+03 R=   2e+04
+Depth=    9554 States= 1.14e+08 Transitions= 2.17e+09 Memory=  6992.061        t= 6.19e+03 R=   2e+04
+Depth=    9554 States= 1.15e+08 Transitions= 2.19e+09 Memory=  7043.623        t= 6.24e+03 R=   2e+04
+Depth=    9554 States= 1.16e+08 Transitions= 2.21e+09 Memory=  7095.186        t= 6.28e+03 R=   2e+04
+Depth=    9554 States= 1.17e+08 Transitions= 2.23e+09 Memory=  7144.600        t= 6.36e+03 R=   2e+04
+Depth=    9554 States= 1.18e+08 Transitions= 2.27e+09 Memory=  7198.897        t= 6.46e+03 R=   2e+04
+Depth=    9554 States= 1.19e+08 Transitions= 2.29e+09 Memory=  7253.779        t= 6.52e+03 R=   2e+04
+Depth=    9554 States=  1.2e+08 Transitions=  2.3e+09 Memory=  7307.783        t= 6.56e+03 R=   2e+04
+Depth=    9554 States= 1.21e+08 Transitions= 2.32e+09 Memory=  7358.467        t= 6.62e+03 R=   2e+04
+Depth=    9554 States= 1.22e+08 Transitions= 2.34e+09 Memory=  7410.811        t= 6.67e+03 R=   2e+04
+Depth=    9554 States= 1.23e+08 Transitions= 2.36e+09 Memory=  7465.010        t= 6.72e+03 R=   2e+04
+Depth=    9554 States= 1.24e+08 Transitions= 2.38e+09 Memory=  7516.670        t= 6.78e+03 R=   2e+04
+Depth=    9554 States= 1.25e+08 Transitions= 2.41e+09 Memory=  7565.205        t= 6.87e+03 R=   2e+04
+Depth=    9554 States= 1.26e+08 Transitions= 2.43e+09 Memory=  7613.936        t= 6.93e+03 R=   2e+04
+Depth=    9554 States= 1.27e+08 Transitions= 2.45e+09 Memory=  7662.471        t=  7e+03 R=   2e+04
+Depth=    9554 States= 1.28e+08 Transitions= 2.48e+09 Memory=  7710.713        t= 7.08e+03 R=   2e+04
+Depth=    9554 States= 1.29e+08 Transitions=  2.5e+09 Memory=  7759.346        t= 7.14e+03 R=   2e+04
+Depth=    9554 States=  1.3e+08 Transitions= 2.52e+09 Memory=  7810.322        t= 7.2e+03 R=   2e+04
+Depth=    9554 States= 1.31e+08 Transitions= 2.55e+09 Memory=  7859.834        t= 7.26e+03 R=   2e+04
+Depth=    9554 States= 1.32e+08 Transitions= 2.57e+09 Memory=  7910.615        t= 7.33e+03 R=   2e+04
+Depth=    9554 States= 1.33e+08 Transitions= 2.59e+09 Memory=  7959.248        t= 7.4e+03 R=   2e+04
+Depth=    9554 States= 1.34e+08 Transitions= 2.61e+09 Memory=  8011.787        t= 7.46e+03 R=   2e+04
+Depth=    9554 States= 1.35e+08 Transitions= 2.63e+09 Memory=  8060.713        t= 7.51e+03 R=   2e+04
+pan: resizing hashtable to -w28..  done
+Depth=    9554 States= 1.36e+08 Transitions= 2.65e+09 Memory= 10108.713        t= 7.58e+03 R=   2e+04
+Depth=    9554 States= 1.37e+08 Transitions= 2.66e+09 Memory= 10144.455        t= 7.62e+03 R=   2e+04
+Depth=    9554 States= 1.38e+08 Transitions= 2.67e+09 Memory= 10199.143        t= 7.65e+03 R=   2e+04
+Depth=    9554 States= 1.39e+08 Transitions= 2.69e+09 Memory= 10253.830        t= 7.69e+03 R=   2e+04
+Depth=    9554 States=  1.4e+08 Transitions=  2.7e+09 Memory= 10308.127        t= 7.74e+03 R=   2e+04
+Depth=    9554 States= 1.41e+08 Transitions= 2.72e+09 Memory= 10360.178        t= 7.78e+03 R=   2e+04
+Depth=    9554 States= 1.42e+08 Transitions= 2.74e+09 Memory= 10413.889        t= 7.83e+03 R=   2e+04
+Depth=    9554 States= 1.43e+08 Transitions= 2.75e+09 Memory= 10467.111        t= 7.87e+03 R=   2e+04
+Depth=    9554 States= 1.44e+08 Transitions= 2.77e+09 Memory= 10518.088        t= 7.92e+03 R=   2e+04
+Depth=    9554 States= 1.45e+08 Transitions= 2.79e+09 Memory= 10567.795        t= 7.98e+03 R=   2e+04
+Depth=    9554 States= 1.46e+08 Transitions= 2.82e+09 Memory= 10624.631        t= 8.08e+03 R=   2e+04
+Depth=    9554 States= 1.47e+08 Transitions= 2.86e+09 Memory= 10689.963        t= 8.19e+03 R=   2e+04
+Depth=    9554 States= 1.48e+08 Transitions= 2.91e+09 Memory= 10752.072        t= 8.34e+03 R=   2e+04
+Depth=    9554 States= 1.49e+08 Transitions= 2.95e+09 Memory= 10802.658        t= 8.46e+03 R=   2e+04
+Depth=    9554 States=  1.5e+08 Transitions= 2.99e+09 Memory= 10848.361        t= 8.56e+03 R=   2e+04
+Depth=    9554 States= 1.51e+08 Transitions=    3e+09 Memory= 10900.022        t= 8.61e+03 R=   2e+04
+Depth=    9554 States= 1.52e+08 Transitions= 3.02e+09 Memory= 10951.096        t= 8.66e+03 R=   2e+04
+Depth=    9554 States= 1.53e+08 Transitions= 3.05e+09 Memory= 11002.365        t= 8.75e+03 R=   2e+04
+Depth=    9554 States= 1.54e+08 Transitions= 3.08e+09 Memory= 11054.611        t= 8.83e+03 R=   2e+04
+Depth=    9554 States= 1.55e+08 Transitions=  3.1e+09 Memory= 11105.783        t= 8.88e+03 R=   2e+04
+Depth=    9554 States= 1.56e+08 Transitions= 3.11e+09 Memory= 11153.830        t= 8.92e+03 R=   2e+04
+Depth=    9554 States= 1.57e+08 Transitions= 3.13e+09 Memory= 11202.365        t= 8.96e+03 R=   2e+04
+Depth=    9554 States= 1.58e+08 Transitions= 3.14e+09 Memory= 11257.443        t=  9e+03 R=   2e+04
+Depth=    9554 States= 1.59e+08 Transitions= 3.17e+09 Memory= 11307.639        t= 9.08e+03 R=   2e+04
+Depth=    9554 States=  1.6e+08 Transitions= 3.19e+09 Memory= 11357.541        t= 9.15e+03 R=   2e+04
+Depth=    9554 States= 1.61e+08 Transitions= 3.21e+09 Memory= 11407.541        t= 9.21e+03 R=   2e+04
+Depth=    9554 States= 1.62e+08 Transitions= 3.24e+09 Memory= 11457.736        t= 9.28e+03 R=   2e+04
+Depth=    9554 States= 1.63e+08 Transitions= 3.27e+09 Memory= 11507.053        t= 9.36e+03 R=   2e+04
+Depth=    9554 States= 1.64e+08 Transitions= 3.29e+09 Memory= 11560.959        t= 9.44e+03 R=   2e+04
+Depth=    9554 States= 1.65e+08 Transitions= 3.31e+09 Memory= 11613.108        t= 9.5e+03 R=   2e+04
+Depth=    9554 States= 1.66e+08 Transitions= 3.34e+09 Memory= 11661.252        t= 9.58e+03 R=   2e+04
+Depth=    9554 States= 1.67e+08 Transitions= 3.37e+09 Memory= 11711.154        t= 9.66e+03 R=   2e+04
+Depth=    9554 States= 1.68e+08 Transitions= 3.39e+09 Memory= 11763.596        t= 9.72e+03 R=   2e+04
+Depth=    9554 States= 1.69e+08 Transitions= 3.41e+09 Memory= 11812.912        t= 9.78e+03 R=   2e+04
+Depth=    9554 States=  1.7e+08 Transitions= 3.44e+09 Memory= 11866.135        t= 9.87e+03 R=   2e+04
+Depth=    9554 States= 1.71e+08 Transitions= 3.46e+09 Memory= 11920.139        t= 9.91e+03 R=   2e+04
+Depth=    9554 States= 1.72e+08 Transitions= 3.47e+09 Memory= 11967.014        t= 9.95e+03 R=   2e+04
+Depth=    9554 States= 1.73e+08 Transitions=  3.5e+09 Memory= 12021.115        t=  1e+04 R=   2e+04
+Depth=    9554 States= 1.74e+08 Transitions= 3.52e+09 Memory= 12075.315        t= 1.01e+04 R=   2e+04
+Depth=    9554 States= 1.75e+08 Transitions= 3.53e+09 Memory= 12126.779        t= 1.01e+04 R=   2e+04
+Depth=    9554 States= 1.76e+08 Transitions= 3.56e+09 Memory= 12180.002        t= 1.02e+04 R=   2e+04
+Depth=    9554 States= 1.77e+08 Transitions= 3.57e+09 Memory= 12229.416        t= 1.02e+04 R=   2e+04
+Depth=    9554 States= 1.78e+08 Transitions= 3.59e+09 Memory= 12282.834        t= 1.03e+04 R=   2e+04
+Depth=    9554 States= 1.79e+08 Transitions= 3.61e+09 Memory= 12334.494        t= 1.03e+04 R=   2e+04
+Depth=    9554 States=  1.8e+08 Transitions= 3.63e+09 Memory= 12387.522        t= 1.04e+04 R=   2e+04
+Depth=    9554 States= 1.81e+08 Transitions= 3.65e+09 Memory= 12440.451        t= 1.05e+04 R=   2e+04
+Depth=    9554 States= 1.82e+08 Transitions= 3.66e+09 Memory= 12492.111        t= 1.05e+04 R=   2e+04
+Depth=    9554 States= 1.83e+08 Transitions= 3.68e+09 Memory= 12544.162        t= 1.05e+04 R=   2e+04
+Depth=    9554 States= 1.84e+08 Transitions= 3.69e+09 Memory= 12594.943        t= 1.06e+04 R=   2e+04
+Depth=    9554 States= 1.85e+08 Transitions= 3.71e+09 Memory= 12645.627        t= 1.06e+04 R=   2e+04
+Depth=    9554 States= 1.86e+08 Transitions= 3.73e+09 Memory= 12695.236        t= 1.07e+04 R=   2e+04
+Depth=    9554 States= 1.87e+08 Transitions= 3.75e+09 Memory= 12750.705        t= 1.07e+04 R=   2e+04
+Depth=    9554 States= 1.88e+08 Transitions= 3.77e+09 Memory= 12807.639        t= 1.08e+04 R=   2e+04
+Depth=    9554 States= 1.89e+08 Transitions= 3.78e+09 Memory= 12855.979        t= 1.08e+04 R=   2e+04
+Depth=    9554 States=  1.9e+08 Transitions=  3.8e+09 Memory= 12909.592        t= 1.09e+04 R=   2e+04
+Depth=    9554 States= 1.91e+08 Transitions= 3.82e+09 Memory= 12959.104        t= 1.09e+04 R=   2e+04
+Depth=    9554 States= 1.92e+08 Transitions= 3.84e+09 Memory= 13008.615        t= 1.1e+04 R=   2e+04
+Depth=    9554 States= 1.93e+08 Transitions= 3.86e+09 Memory= 13065.061        t= 1.11e+04 R=   2e+04
+Depth=    9554 States= 1.94e+08 Transitions= 3.88e+09 Memory= 13114.377        t= 1.11e+04 R=   2e+04
+Depth=    9554 States= 1.95e+08 Transitions=  3.9e+09 Memory= 13168.674        t= 1.12e+04 R=   2e+04
+Depth=    9554 States= 1.96e+08 Transitions= 3.92e+09 Memory= 13222.678        t= 1.12e+04 R=   2e+04
+Depth=    9554 States= 1.97e+08 Transitions= 3.93e+09 Memory= 13271.701        t= 1.13e+04 R=   2e+04
+Depth=    9554 States= 1.98e+08 Transitions= 3.95e+09 Memory= 13324.338        t= 1.13e+04 R=   2e+04
+Depth=    9554 States= 1.99e+08 Transitions= 3.96e+09 Memory= 13377.170        t= 1.13e+04 R=   2e+04
+Depth=    9554 States=    2e+08 Transitions= 3.98e+09 Memory= 13432.639        t= 1.14e+04 R=   2e+04
+Depth=    9554 States= 2.01e+08 Transitions=    4e+09 Memory= 13483.811        t= 1.14e+04 R=   2e+04
+Depth=    9554 States= 2.02e+08 Transitions= 4.03e+09 Memory= 13537.522        t= 1.15e+04 R=   2e+04
+Depth=    9554 States= 2.03e+08 Transitions= 4.04e+09 Memory= 13588.693        t= 1.16e+04 R=   2e+04
+Depth=    9554 States= 2.04e+08 Transitions= 4.06e+09 Memory= 13642.600        t= 1.16e+04 R=   2e+04
+Depth=    9554 States= 2.05e+08 Transitions= 4.08e+09 Memory= 13696.799        t= 1.17e+04 R=   2e+04
+Depth=    9554 States= 2.06e+08 Transitions= 4.09e+09 Memory= 13744.748        t= 1.17e+04 R=   2e+04
+Depth=    9554 States= 2.07e+08 Transitions= 4.11e+09 Memory= 13798.068        t= 1.18e+04 R=   2e+04
+Depth=    9554 States= 2.08e+08 Transitions= 4.13e+09 Memory= 13850.022        t= 1.18e+04 R=   2e+04
+Depth=    9554 States= 2.09e+08 Transitions= 4.14e+09 Memory= 13905.783        t= 1.18e+04 R=   2e+04
+Depth=    9554 States=  2.1e+08 Transitions= 4.16e+09 Memory= 13952.365        t= 1.19e+04 R=   2e+04
+Depth=    9554 States= 2.11e+08 Transitions= 4.18e+09 Memory= 14003.244        t= 1.19e+04 R=   2e+04
+Depth=    9554 States= 2.12e+08 Transitions= 4.19e+09 Memory= 14054.318        t= 1.2e+04 R=   2e+04
+Depth=    9554 States= 2.13e+08 Transitions= 4.21e+09 Memory= 14106.076        t= 1.2e+04 R=   2e+04
+Depth=    9554 States= 2.14e+08 Transitions= 4.22e+09 Memory= 14156.662        t= 1.21e+04 R=   2e+04
+Depth=    9554 States= 2.15e+08 Transitions= 4.26e+09 Memory= 14209.690        t= 1.22e+04 R=   2e+04
+Depth=    9554 States= 2.16e+08 Transitions= 4.29e+09 Memory= 14265.256        t= 1.23e+04 R=   2e+04
+Depth=    9554 States= 2.17e+08 Transitions=  4.3e+09 Memory= 14317.697        t= 1.23e+04 R=   2e+04
+Depth=    9554 States= 2.18e+08 Transitions= 4.32e+09 Memory= 14371.311        t= 1.23e+04 R=   2e+04
+Depth=    9554 States= 2.19e+08 Transitions= 4.33e+09 Memory= 14424.045        t= 1.24e+04 R=   2e+04
+Depth=    9554 States=  2.2e+08 Transitions= 4.35e+09 Memory= 14477.854        t= 1.24e+04 R=   2e+04
+Depth=    9554 States= 2.21e+08 Transitions= 4.37e+09 Memory= 14529.611        t= 1.25e+04 R=   2e+04
+Depth=    9554 States= 2.22e+08 Transitions=  4.4e+09 Memory= 14576.291        t= 1.26e+04 R=   2e+04
+Depth=    9554 States= 2.23e+08 Transitions= 4.43e+09 Memory= 14625.608        t= 1.27e+04 R=   2e+04
+Depth=    9554 States= 2.24e+08 Transitions= 4.45e+09 Memory= 14674.045        t= 1.27e+04 R=   2e+04
+Depth=    9554 States= 2.25e+08 Transitions= 4.48e+09 Memory= 14722.776        t= 1.28e+04 R=   2e+04
+Depth=    9554 States= 2.26e+08 Transitions=  4.5e+09 Memory= 14770.529        t= 1.29e+04 R=   2e+04
+Depth=    9554 States= 2.27e+08 Transitions= 4.52e+09 Memory= 14821.018        t= 1.29e+04 R=   2e+04
+Depth=    9554 States= 2.28e+08 Transitions= 4.54e+09 Memory= 14870.529        t= 1.3e+04 R=   2e+04
+Depth=    9554 States= 2.29e+08 Transitions= 4.56e+09 Memory= 14918.967        t= 1.3e+04 R=   2e+04
+Depth=    9554 States=  2.3e+08 Transitions= 4.59e+09 Memory= 14966.916        t= 1.31e+04 R=   2e+04
+Depth=    9554 States= 2.31e+08 Transitions=  4.6e+09 Memory= 15017.600        t= 1.32e+04 R=   2e+04
+Depth=    9554 States= 2.32e+08 Transitions= 4.63e+09 Memory= 15066.330        t= 1.32e+04 R=   2e+04
diff --git a/formal-model/urcu-controldataflow-intel-ipi/urcu_progress_writer_error.define b/formal-model/urcu-controldataflow-intel-ipi/urcu_progress_writer_error.define
new file mode 100644 (file)
index 0000000..8d304f5
--- /dev/null
@@ -0,0 +1,2 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/.input.spin b/formal-model/urcu-controldataflow-intel-no-ipi/.input.spin
new file mode 100644 (file)
index 0000000..3191ba1
--- /dev/null
@@ -0,0 +1,1272 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/DEFINES b/formal-model/urcu-controldataflow-intel-no-ipi/DEFINES
new file mode 100644 (file)
index 0000000..b4d92d7
--- /dev/null
@@ -0,0 +1,18 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/Makefile b/formal-model/urcu-controldataflow-intel-no-ipi/Makefile
new file mode 100644 (file)
index 0000000..de47dff
--- /dev/null
@@ -0,0 +1,170 @@
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) Mathieu Desnoyers, 2009
+#
+# Authors: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+#CFLAGS=-DSAFETY
+#for multi-core verif, 15.5GB shared mem, use files if full
+#CFLAGS=-DHASH64 -DMEMLIM=15500 -DNCORE=2
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88 -DMEMLIM=15500 -DNCORE=8
+
+#liveness
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88
+CFLAGS=-DHASH64
+
+SPINFILE=urcu.spin
+
+default:
+       make urcu_free | tee urcu_free.log
+       make urcu_free_no_mb | tee urcu_free_no_mb.log
+       make urcu_free_no_rmb | tee urcu_free_no_rmb.log
+       make urcu_free_no_wmb | tee urcu_free_no_wmb.log
+       make urcu_free_single_flip | tee urcu_free_single_flip.log
+       make urcu_progress_writer | tee urcu_progress_writer.log
+       make urcu_progress_reader | tee urcu_progress_reader.log
+       make urcu_progress_writer_error | tee urcu_progress_writer_error.log
+       make asserts | tee asserts.log
+       make summary
+
+#show trail : spin -v -t -N pan.ltl input.spin
+# after each individual make.
+
+summary:
+       @echo
+       @echo "Verification summary"
+       @grep errors: *.log
+
+asserts: clean
+       cat DEFINES > .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X .input.spin
+       gcc -O2 -w ${CFLAGS} -DSAFETY -o pan pan.c
+       ./pan -v -c1 -X -m10000000 -w20
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free: clean urcu_free_ltl run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested: clean urcu_free_ltl urcu_free_nested_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested_define:
+       cp urcu_free_nested.define .input.define
+
+urcu_free_no_rmb: clean urcu_free_ltl urcu_free_no_rmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_rmb_define:
+       cp urcu_free_no_rmb.define .input.define
+
+urcu_free_no_wmb: clean urcu_free_ltl urcu_free_no_wmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_wmb_define:
+       cp urcu_free_no_wmb.define .input.define
+
+urcu_free_no_mb: clean urcu_free_ltl urcu_free_no_mb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_mb_define:
+       cp urcu_free_no_mb.define .input.define
+
+urcu_free_single_flip: clean urcu_free_ltl urcu_free_single_flip_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_single_flip_define:
+       cp urcu_free_single_flip.define .input.define
+
+urcu_free_ltl:
+       touch .input.define
+       cat .input.define >> pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+
+# Progress checks
+
+urcu_progress_writer: clean urcu_progress_writer_ltl \
+               urcu_progress_writer_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_define:
+       cp urcu_progress_writer.define .input.define
+
+urcu_progress_writer_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_reader: clean urcu_progress_reader_ltl \
+               urcu_progress_reader_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_reader_define:
+       cp urcu_progress_reader.define .input.define
+
+urcu_progress_reader_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_writer_error: clean urcu_progress_writer_error_ltl \
+               urcu_progress_writer_error_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_error_define:
+       cp urcu_progress_writer_error.define .input.define
+
+urcu_progress_writer_error_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+
+run_weak_fair: pan
+       ./pan -a -f -v -c1 -X -m10000000 -w20
+
+run: pan
+       ./pan -a -v -c1 -X -m10000000 -w20
+
+pan: pan.c
+       gcc -O2 -w ${CFLAGS} -o pan pan.c
+
+pan.c: pan.ltl ${SPINFILE}
+       cat .input.define > .input.spin
+       cat DEFINES >> .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X -N pan.ltl .input.spin
+
+.PHONY: clean default distclean summary
+clean:
+       rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+distclean:
+       rm -f *.trail *.input *.log
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/asserts.log b/formal-model/urcu-controldataflow-intel-no-ipi/asserts.log
new file mode 100644 (file)
index 0000000..fe09a53
--- /dev/null
@@ -0,0 +1,429 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+cat DEFINES > .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w20
+Depth=    4473 States=    1e+06 Transitions= 2.37e+07 Memory=   542.619        t=   57.3 R=   2e+04
+Depth=    4540 States=    2e+06 Transitions=  4.8e+07 Memory=   618.889        t=    117 R=   2e+04
+Depth=    4540 States=    3e+06 Transitions= 7.25e+07 Memory=   695.158        t=    178 R=   2e+04
+pan: resizing hashtable to -w22..  done
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             - (none specified)
+       assertion violations    +
+       cycle checks            - (disabled by -DSAFETY)
+       invalid end states      +
+
+State-vector 72 byte, depth reached 4540, errors: 0
+  3841511 states, stored
+ 90242688 states, matched
+ 94084199 transitions (= stored+matched)
+1.5073578e+09 atomic steps
+hash conflicts:  63759942 (resolved)
+
+Stats on memory usage (in Megabytes):
+  366.355      equivalent memory usage for states (stored*(State-vector + overhead))
+  300.680      actual memory usage for states (compression: 82.07%)
+               state-vector as stored = 54 byte + 28 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+  790.440      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 410, ".input.spin", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 81, "(1)"
+       line 256, ".input.spin", state 101, "(1)"
+       line 260, ".input.spin", state 109, "(1)"
+       line 596, ".input.spin", state 128, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 410, ".input.spin", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 167, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 199, "(1)"
+       line 256, ".input.spin", state 219, "(1)"
+       line 260, ".input.spin", state 227, "(1)"
+       line 410, ".input.spin", state 246, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 278, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 292, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 310, "(1)"
+       line 256, ".input.spin", state 330, "(1)"
+       line 260, ".input.spin", state 338, "(1)"
+       line 410, ".input.spin", state 359, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, ".input.spin", state 361, "(1)"
+       line 410, ".input.spin", state 362, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, ".input.spin", state 362, "else"
+       line 410, ".input.spin", state 365, "(1)"
+       line 414, ".input.spin", state 373, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 375, "(1)"
+       line 414, ".input.spin", state 376, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, ".input.spin", state 376, "else"
+       line 414, ".input.spin", state 379, "(1)"
+       line 414, ".input.spin", state 380, "(1)"
+       line 414, ".input.spin", state 380, "(1)"
+       line 412, ".input.spin", state 385, "((i<1))"
+       line 412, ".input.spin", state 385, "((i>=1))"
+       line 419, ".input.spin", state 391, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 393, "(1)"
+       line 419, ".input.spin", state 394, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, ".input.spin", state 394, "else"
+       line 419, ".input.spin", state 397, "(1)"
+       line 419, ".input.spin", state 398, "(1)"
+       line 419, ".input.spin", state 398, "(1)"
+       line 423, ".input.spin", state 405, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 407, "(1)"
+       line 423, ".input.spin", state 408, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, ".input.spin", state 408, "else"
+       line 423, ".input.spin", state 411, "(1)"
+       line 423, ".input.spin", state 412, "(1)"
+       line 423, ".input.spin", state 412, "(1)"
+       line 421, ".input.spin", state 417, "((i<2))"
+       line 421, ".input.spin", state 417, "((i>=2))"
+       line 248, ".input.spin", state 423, "(1)"
+       line 252, ".input.spin", state 431, "(1)"
+       line 252, ".input.spin", state 432, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 252, ".input.spin", state 432, "else"
+       line 250, ".input.spin", state 437, "((i<1))"
+       line 250, ".input.spin", state 437, "((i>=1))"
+       line 256, ".input.spin", state 443, "(1)"
+       line 256, ".input.spin", state 444, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 256, ".input.spin", state 444, "else"
+       line 260, ".input.spin", state 451, "(1)"
+       line 260, ".input.spin", state 452, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 260, ".input.spin", state 452, "else"
+       line 258, ".input.spin", state 457, "((i<2))"
+       line 258, ".input.spin", state 457, "((i>=2))"
+       line 265, ".input.spin", state 461, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 265, ".input.spin", state 461, "else"
+       line 430, ".input.spin", state 463, "(1)"
+       line 430, ".input.spin", state 463, "(1)"
+       line 596, ".input.spin", state 466, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 596, ".input.spin", state 467, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 596, ".input.spin", state 468, "(1)"
+       line 271, ".input.spin", state 472, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, ".input.spin", state 483, "(1)"
+       line 279, ".input.spin", state 494, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 503, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 519, "(1)"
+       line 252, ".input.spin", state 527, "(1)"
+       line 256, ".input.spin", state 539, "(1)"
+       line 260, ".input.spin", state 547, "(1)"
+       line 410, ".input.spin", state 565, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 579, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 597, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 611, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 629, "(1)"
+       line 252, ".input.spin", state 637, "(1)"
+       line 256, ".input.spin", state 649, "(1)"
+       line 260, ".input.spin", state 657, "(1)"
+       line 410, ".input.spin", state 683, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 715, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 729, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 747, "(1)"
+       line 256, ".input.spin", state 767, "(1)"
+       line 260, ".input.spin", state 775, "(1)"
+       line 410, ".input.spin", state 794, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, ".input.spin", state 796, "(1)"
+       line 410, ".input.spin", state 797, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, ".input.spin", state 797, "else"
+       line 410, ".input.spin", state 800, "(1)"
+       line 414, ".input.spin", state 808, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 810, "(1)"
+       line 414, ".input.spin", state 811, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, ".input.spin", state 811, "else"
+       line 414, ".input.spin", state 814, "(1)"
+       line 414, ".input.spin", state 815, "(1)"
+       line 414, ".input.spin", state 815, "(1)"
+       line 412, ".input.spin", state 820, "((i<1))"
+       line 412, ".input.spin", state 820, "((i>=1))"
+       line 419, ".input.spin", state 826, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 828, "(1)"
+       line 419, ".input.spin", state 829, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, ".input.spin", state 829, "else"
+       line 419, ".input.spin", state 832, "(1)"
+       line 419, ".input.spin", state 833, "(1)"
+       line 419, ".input.spin", state 833, "(1)"
+       line 423, ".input.spin", state 840, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 842, "(1)"
+       line 423, ".input.spin", state 843, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, ".input.spin", state 843, "else"
+       line 423, ".input.spin", state 846, "(1)"
+       line 423, ".input.spin", state 847, "(1)"
+       line 423, ".input.spin", state 847, "(1)"
+       line 421, ".input.spin", state 852, "((i<2))"
+       line 421, ".input.spin", state 852, "((i>=2))"
+       line 248, ".input.spin", state 858, "(1)"
+       line 252, ".input.spin", state 866, "(1)"
+       line 252, ".input.spin", state 867, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 252, ".input.spin", state 867, "else"
+       line 250, ".input.spin", state 872, "((i<1))"
+       line 250, ".input.spin", state 872, "((i>=1))"
+       line 256, ".input.spin", state 878, "(1)"
+       line 256, ".input.spin", state 879, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 256, ".input.spin", state 879, "else"
+       line 260, ".input.spin", state 886, "(1)"
+       line 260, ".input.spin", state 887, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 260, ".input.spin", state 887, "else"
+       line 258, ".input.spin", state 892, "((i<2))"
+       line 258, ".input.spin", state 892, "((i>=2))"
+       line 265, ".input.spin", state 896, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 265, ".input.spin", state 896, "else"
+       line 430, ".input.spin", state 898, "(1)"
+       line 430, ".input.spin", state 898, "(1)"
+       line 604, ".input.spin", state 902, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 410, ".input.spin", state 907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 921, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 971, "(1)"
+       line 252, ".input.spin", state 979, "(1)"
+       line 256, ".input.spin", state 991, "(1)"
+       line 260, ".input.spin", state 999, "(1)"
+       line 410, ".input.spin", state 1021, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1053, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1067, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1085, "(1)"
+       line 256, ".input.spin", state 1105, "(1)"
+       line 260, ".input.spin", state 1113, "(1)"
+       line 410, ".input.spin", state 1136, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1168, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1182, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1200, "(1)"
+       line 256, ".input.spin", state 1220, "(1)"
+       line 260, ".input.spin", state 1228, "(1)"
+       line 410, ".input.spin", state 1247, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1279, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1293, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1311, "(1)"
+       line 256, ".input.spin", state 1331, "(1)"
+       line 260, ".input.spin", state 1339, "(1)"
+       line 271, ".input.spin", state 1360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 1382, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 1391, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1407, "(1)"
+       line 252, ".input.spin", state 1415, "(1)"
+       line 256, ".input.spin", state 1427, "(1)"
+       line 260, ".input.spin", state 1435, "(1)"
+       line 410, ".input.spin", state 1453, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 1467, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1485, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1499, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1517, "(1)"
+       line 252, ".input.spin", state 1525, "(1)"
+       line 256, ".input.spin", state 1537, "(1)"
+       line 260, ".input.spin", state 1545, "(1)"
+       line 410, ".input.spin", state 1564, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 1578, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1596, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1610, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1628, "(1)"
+       line 252, ".input.spin", state 1636, "(1)"
+       line 256, ".input.spin", state 1648, "(1)"
+       line 260, ".input.spin", state 1656, "(1)"
+       line 410, ".input.spin", state 1678, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1710, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1724, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1742, "(1)"
+       line 256, ".input.spin", state 1762, "(1)"
+       line 260, ".input.spin", state 1770, "(1)"
+       line 643, ".input.spin", state 1789, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 410, ".input.spin", state 1796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1828, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1842, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1860, "(1)"
+       line 256, ".input.spin", state 1880, "(1)"
+       line 260, ".input.spin", state 1888, "(1)"
+       line 410, ".input.spin", state 1907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 1939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 1953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1971, "(1)"
+       line 256, ".input.spin", state 1991, "(1)"
+       line 260, ".input.spin", state 1999, "(1)"
+       line 410, ".input.spin", state 2020, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, ".input.spin", state 2022, "(1)"
+       line 410, ".input.spin", state 2023, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, ".input.spin", state 2023, "else"
+       line 410, ".input.spin", state 2026, "(1)"
+       line 414, ".input.spin", state 2034, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 2036, "(1)"
+       line 414, ".input.spin", state 2037, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, ".input.spin", state 2037, "else"
+       line 414, ".input.spin", state 2040, "(1)"
+       line 414, ".input.spin", state 2041, "(1)"
+       line 414, ".input.spin", state 2041, "(1)"
+       line 412, ".input.spin", state 2046, "((i<1))"
+       line 412, ".input.spin", state 2046, "((i>=1))"
+       line 419, ".input.spin", state 2052, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 2054, "(1)"
+       line 419, ".input.spin", state 2055, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, ".input.spin", state 2055, "else"
+       line 419, ".input.spin", state 2058, "(1)"
+       line 419, ".input.spin", state 2059, "(1)"
+       line 419, ".input.spin", state 2059, "(1)"
+       line 423, ".input.spin", state 2066, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 2068, "(1)"
+       line 423, ".input.spin", state 2069, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, ".input.spin", state 2069, "else"
+       line 423, ".input.spin", state 2072, "(1)"
+       line 423, ".input.spin", state 2073, "(1)"
+       line 423, ".input.spin", state 2073, "(1)"
+       line 421, ".input.spin", state 2078, "((i<2))"
+       line 421, ".input.spin", state 2078, "((i>=2))"
+       line 248, ".input.spin", state 2084, "(1)"
+       line 252, ".input.spin", state 2092, "(1)"
+       line 252, ".input.spin", state 2093, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 252, ".input.spin", state 2093, "else"
+       line 250, ".input.spin", state 2098, "((i<1))"
+       line 250, ".input.spin", state 2098, "((i>=1))"
+       line 256, ".input.spin", state 2104, "(1)"
+       line 256, ".input.spin", state 2105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 256, ".input.spin", state 2105, "else"
+       line 260, ".input.spin", state 2112, "(1)"
+       line 260, ".input.spin", state 2113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 260, ".input.spin", state 2113, "else"
+       line 258, ".input.spin", state 2118, "((i<2))"
+       line 258, ".input.spin", state 2118, "((i>=2))"
+       line 265, ".input.spin", state 2122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 265, ".input.spin", state 2122, "else"
+       line 430, ".input.spin", state 2124, "(1)"
+       line 430, ".input.spin", state 2124, "(1)"
+       line 643, ".input.spin", state 2127, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 643, ".input.spin", state 2128, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 643, ".input.spin", state 2129, "(1)"
+       line 271, ".input.spin", state 2133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 2155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 2164, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 2180, "(1)"
+       line 252, ".input.spin", state 2188, "(1)"
+       line 256, ".input.spin", state 2200, "(1)"
+       line 260, ".input.spin", state 2208, "(1)"
+       line 410, ".input.spin", state 2226, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 2240, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 2258, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 2272, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 2290, "(1)"
+       line 252, ".input.spin", state 2298, "(1)"
+       line 256, ".input.spin", state 2310, "(1)"
+       line 260, ".input.spin", state 2318, "(1)"
+       line 271, ".input.spin", state 2340, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, ".input.spin", state 2349, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 2362, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 2371, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 2387, "(1)"
+       line 252, ".input.spin", state 2395, "(1)"
+       line 256, ".input.spin", state 2407, "(1)"
+       line 260, ".input.spin", state 2415, "(1)"
+       line 410, ".input.spin", state 2433, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 2447, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 2465, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 2479, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 2497, "(1)"
+       line 252, ".input.spin", state 2505, "(1)"
+       line 256, ".input.spin", state 2517, "(1)"
+       line 260, ".input.spin", state 2525, "(1)"
+       line 410, ".input.spin", state 2544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 2558, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 2576, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 2590, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 2608, "(1)"
+       line 252, ".input.spin", state 2616, "(1)"
+       line 256, ".input.spin", state 2628, "(1)"
+       line 260, ".input.spin", state 2636, "(1)"
+       line 248, ".input.spin", state 2667, "(1)"
+       line 256, ".input.spin", state 2687, "(1)"
+       line 260, ".input.spin", state 2695, "(1)"
+       line 248, ".input.spin", state 2710, "(1)"
+       line 252, ".input.spin", state 2718, "(1)"
+       line 256, ".input.spin", state 2730, "(1)"
+       line 260, ".input.spin", state 2738, "(1)"
+       line 897, ".input.spin", state 2755, "-end-"
+       (259 of 2755 states)
+unreached in proctype urcu_writer
+       line 410, ".input.spin", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 82, "(1)"
+       line 252, ".input.spin", state 90, "(1)"
+       line 256, ".input.spin", state 102, "(1)"
+       line 271, ".input.spin", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, ".input.spin", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, ".input.spin", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, ".input.spin", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 257, "(1)"
+       line 252, ".input.spin", state 265, "(1)"
+       line 256, ".input.spin", state 277, "(1)"
+       line 260, ".input.spin", state 285, "(1)"
+       line 414, ".input.spin", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, ".input.spin", state 378, "(1)"
+       line 256, ".input.spin", state 390, "(1)"
+       line 260, ".input.spin", state 398, "(1)"
+       line 414, ".input.spin", state 441, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, ".input.spin", state 499, "(1)"
+       line 256, ".input.spin", state 511, "(1)"
+       line 260, ".input.spin", state 519, "(1)"
+       line 414, ".input.spin", state 552, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 570, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 584, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, ".input.spin", state 610, "(1)"
+       line 256, ".input.spin", state 622, "(1)"
+       line 260, ".input.spin", state 630, "(1)"
+       line 414, ".input.spin", state 665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, ".input.spin", state 683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, ".input.spin", state 697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, ".input.spin", state 723, "(1)"
+       line 256, ".input.spin", state 735, "(1)"
+       line 260, ".input.spin", state 743, "(1)"
+       line 271, ".input.spin", state 796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, ".input.spin", state 805, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 820, "(1)"
+       line 283, ".input.spin", state 827, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 843, "(1)"
+       line 252, ".input.spin", state 851, "(1)"
+       line 256, ".input.spin", state 863, "(1)"
+       line 260, ".input.spin", state 871, "(1)"
+       line 275, ".input.spin", state 896, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 909, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 918, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 934, "(1)"
+       line 252, ".input.spin", state 942, "(1)"
+       line 256, ".input.spin", state 954, "(1)"
+       line 260, ".input.spin", state 962, "(1)"
+       line 275, ".input.spin", state 987, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 1000, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 1009, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1025, "(1)"
+       line 252, ".input.spin", state 1033, "(1)"
+       line 256, ".input.spin", state 1045, "(1)"
+       line 260, ".input.spin", state 1053, "(1)"
+       line 275, ".input.spin", state 1078, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, ".input.spin", state 1091, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, ".input.spin", state 1100, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, ".input.spin", state 1116, "(1)"
+       line 252, ".input.spin", state 1124, "(1)"
+       line 256, ".input.spin", state 1136, "(1)"
+       line 260, ".input.spin", state 1144, "(1)"
+       line 1236, ".input.spin", state 1159, "-end-"
+       (71 of 1159 states)
+unreached in proctype :init:
+       (0 of 78 states)
+
+pan: elapsed time 231 seconds
+pan: rate 16628.478 states/second
+pan: avg transition delay 2.4555e-06 usec
+cp .input.spin asserts.spin.input
+cp .input.spin.trail asserts.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/asserts.spin.input b/formal-model/urcu-controldataflow-intel-no-ipi/asserts.spin.input
new file mode 100644 (file)
index 0000000..3191ba1
--- /dev/null
@@ -0,0 +1,1272 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/references.txt b/formal-model/urcu-controldataflow-intel-no-ipi/references.txt
new file mode 100644 (file)
index 0000000..72c67a2
--- /dev/null
@@ -0,0 +1,13 @@
+http://spinroot.com/spin/Man/ltl.html
+http://en.wikipedia.org/wiki/Linear_temporal_logic
+http://www.dcs.gla.ac.uk/~muffy/MRS4-2002/lect11.ppt
+
+http://www.lsv.ens-cachan.fr/~gastin/ltl2ba/index.php
+http://spinroot.com/spin/Man/index.html
+http://spinroot.com/spin/Man/promela.html
+
+LTL vs CTL :
+
+http://spinroot.com/spin/Doc/course/lecture12.pdf p. 9, p. 15, p. 18
+http://www-i2.informatik.rwth-aachen.de/i2/fileadmin/user_upload/documents/Introduction_to_Model_Checking/mc_lec18.pdf
+  (downloaded)
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu.sh b/formal-model/urcu-controldataflow-intel-no-ipi/urcu.sh
new file mode 100644 (file)
index 0000000..65ff517
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Compiles and runs the urcu.spin Promela model.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) IBM Corporation, 2009
+#               Mathieu Desnoyers, 2009
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+#          Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+# Basic execution, without LTL clauses. See Makefile.
+
+spin -a urcu.spin
+cc -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w21
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu.spin b/formal-model/urcu-controldataflow-intel-no-ipi/urcu.spin
new file mode 100644 (file)
index 0000000..54752a1
--- /dev/null
@@ -0,0 +1,1254 @@
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free.log b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free.log
new file mode 100644 (file)
index 0000000..aaa4217
--- /dev/null
@@ -0,0 +1,440 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1294)
+Depth=    4900 States=    1e+06 Transitions= 2.37e+07 Memory=   550.334        t=   65.5 R=   2e+04
+Depth=    4973 States=    2e+06 Transitions=  4.8e+07 Memory=   634.221        t=    134 R=   1e+04
+Depth=    4973 States=    3e+06 Transitions= 7.25e+07 Memory=   718.205        t=    204 R=   1e+04
+pan: resizing hashtable to -w22..  done
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 4973, errors: 0
+  3841511 states, stored
+ 90254094 states, matched
+ 94095605 transitions (= stored+matched)
+1.5073578e+09 atomic steps
+hash conflicts:  63765708 (resolved)
+
+Stats on memory usage (in Megabytes):
+  424.972      equivalent memory usage for states (stored*(State-vector + overhead))
+  330.368      actual memory usage for states (compression: 77.74%)
+               state-vector as stored = 62 byte + 28 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+  819.932      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 410, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 81, "(1)"
+       line 256, "pan.___", state 101, "(1)"
+       line 260, "pan.___", state 109, "(1)"
+       line 596, "pan.___", state 128, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 410, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 167, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 199, "(1)"
+       line 256, "pan.___", state 219, "(1)"
+       line 260, "pan.___", state 227, "(1)"
+       line 410, "pan.___", state 246, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 278, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 292, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 310, "(1)"
+       line 256, "pan.___", state 330, "(1)"
+       line 260, "pan.___", state 338, "(1)"
+       line 410, "pan.___", state 359, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 361, "(1)"
+       line 410, "pan.___", state 362, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 362, "else"
+       line 410, "pan.___", state 365, "(1)"
+       line 414, "pan.___", state 373, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 375, "(1)"
+       line 414, "pan.___", state 376, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 376, "else"
+       line 414, "pan.___", state 379, "(1)"
+       line 414, "pan.___", state 380, "(1)"
+       line 414, "pan.___", state 380, "(1)"
+       line 412, "pan.___", state 385, "((i<1))"
+       line 412, "pan.___", state 385, "((i>=1))"
+       line 419, "pan.___", state 391, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 393, "(1)"
+       line 419, "pan.___", state 394, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, "pan.___", state 394, "else"
+       line 419, "pan.___", state 397, "(1)"
+       line 419, "pan.___", state 398, "(1)"
+       line 419, "pan.___", state 398, "(1)"
+       line 423, "pan.___", state 405, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 407, "(1)"
+       line 423, "pan.___", state 408, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, "pan.___", state 408, "else"
+       line 423, "pan.___", state 411, "(1)"
+       line 423, "pan.___", state 412, "(1)"
+       line 423, "pan.___", state 412, "(1)"
+       line 421, "pan.___", state 417, "((i<2))"
+       line 421, "pan.___", state 417, "((i>=2))"
+       line 248, "pan.___", state 423, "(1)"
+       line 252, "pan.___", state 431, "(1)"
+       line 252, "pan.___", state 432, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 432, "else"
+       line 250, "pan.___", state 437, "((i<1))"
+       line 250, "pan.___", state 437, "((i>=1))"
+       line 256, "pan.___", state 443, "(1)"
+       line 256, "pan.___", state 444, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 444, "else"
+       line 260, "pan.___", state 451, "(1)"
+       line 260, "pan.___", state 452, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 260, "pan.___", state 452, "else"
+       line 258, "pan.___", state 457, "((i<2))"
+       line 258, "pan.___", state 457, "((i>=2))"
+       line 265, "pan.___", state 461, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 265, "pan.___", state 461, "else"
+       line 430, "pan.___", state 463, "(1)"
+       line 430, "pan.___", state 463, "(1)"
+       line 596, "pan.___", state 466, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 596, "pan.___", state 467, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 596, "pan.___", state 468, "(1)"
+       line 271, "pan.___", state 472, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 483, "(1)"
+       line 279, "pan.___", state 494, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 503, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 519, "(1)"
+       line 252, "pan.___", state 527, "(1)"
+       line 256, "pan.___", state 539, "(1)"
+       line 260, "pan.___", state 547, "(1)"
+       line 410, "pan.___", state 565, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 579, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 597, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 611, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 629, "(1)"
+       line 252, "pan.___", state 637, "(1)"
+       line 256, "pan.___", state 649, "(1)"
+       line 260, "pan.___", state 657, "(1)"
+       line 410, "pan.___", state 683, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 715, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 729, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 747, "(1)"
+       line 256, "pan.___", state 767, "(1)"
+       line 260, "pan.___", state 775, "(1)"
+       line 410, "pan.___", state 794, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 796, "(1)"
+       line 410, "pan.___", state 797, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 797, "else"
+       line 410, "pan.___", state 800, "(1)"
+       line 414, "pan.___", state 808, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 810, "(1)"
+       line 414, "pan.___", state 811, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 811, "else"
+       line 414, "pan.___", state 814, "(1)"
+       line 414, "pan.___", state 815, "(1)"
+       line 414, "pan.___", state 815, "(1)"
+       line 412, "pan.___", state 820, "((i<1))"
+       line 412, "pan.___", state 820, "((i>=1))"
+       line 419, "pan.___", state 826, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 828, "(1)"
+       line 419, "pan.___", state 829, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, "pan.___", state 829, "else"
+       line 419, "pan.___", state 832, "(1)"
+       line 419, "pan.___", state 833, "(1)"
+       line 419, "pan.___", state 833, "(1)"
+       line 423, "pan.___", state 840, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 842, "(1)"
+       line 423, "pan.___", state 843, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, "pan.___", state 843, "else"
+       line 423, "pan.___", state 846, "(1)"
+       line 423, "pan.___", state 847, "(1)"
+       line 423, "pan.___", state 847, "(1)"
+       line 421, "pan.___", state 852, "((i<2))"
+       line 421, "pan.___", state 852, "((i>=2))"
+       line 248, "pan.___", state 858, "(1)"
+       line 252, "pan.___", state 866, "(1)"
+       line 252, "pan.___", state 867, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 867, "else"
+       line 250, "pan.___", state 872, "((i<1))"
+       line 250, "pan.___", state 872, "((i>=1))"
+       line 256, "pan.___", state 878, "(1)"
+       line 256, "pan.___", state 879, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 879, "else"
+       line 260, "pan.___", state 886, "(1)"
+       line 260, "pan.___", state 887, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 260, "pan.___", state 887, "else"
+       line 258, "pan.___", state 892, "((i<2))"
+       line 258, "pan.___", state 892, "((i>=2))"
+       line 265, "pan.___", state 896, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 265, "pan.___", state 896, "else"
+       line 430, "pan.___", state 898, "(1)"
+       line 430, "pan.___", state 898, "(1)"
+       line 604, "pan.___", state 902, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 410, "pan.___", state 907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 921, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 971, "(1)"
+       line 252, "pan.___", state 979, "(1)"
+       line 256, "pan.___", state 991, "(1)"
+       line 260, "pan.___", state 999, "(1)"
+       line 410, "pan.___", state 1021, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1053, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1067, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1085, "(1)"
+       line 256, "pan.___", state 1105, "(1)"
+       line 260, "pan.___", state 1113, "(1)"
+       line 410, "pan.___", state 1136, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1168, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1182, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1200, "(1)"
+       line 256, "pan.___", state 1220, "(1)"
+       line 260, "pan.___", state 1228, "(1)"
+       line 410, "pan.___", state 1247, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1279, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1293, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1311, "(1)"
+       line 256, "pan.___", state 1331, "(1)"
+       line 260, "pan.___", state 1339, "(1)"
+       line 271, "pan.___", state 1360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1382, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1391, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1407, "(1)"
+       line 252, "pan.___", state 1415, "(1)"
+       line 256, "pan.___", state 1427, "(1)"
+       line 260, "pan.___", state 1435, "(1)"
+       line 410, "pan.___", state 1453, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1467, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1485, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1499, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1517, "(1)"
+       line 252, "pan.___", state 1525, "(1)"
+       line 256, "pan.___", state 1537, "(1)"
+       line 260, "pan.___", state 1545, "(1)"
+       line 410, "pan.___", state 1564, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1578, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1596, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1610, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1628, "(1)"
+       line 252, "pan.___", state 1636, "(1)"
+       line 256, "pan.___", state 1648, "(1)"
+       line 260, "pan.___", state 1656, "(1)"
+       line 410, "pan.___", state 1678, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1710, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1724, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1742, "(1)"
+       line 256, "pan.___", state 1762, "(1)"
+       line 260, "pan.___", state 1770, "(1)"
+       line 643, "pan.___", state 1789, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 410, "pan.___", state 1796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1828, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1842, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1860, "(1)"
+       line 256, "pan.___", state 1880, "(1)"
+       line 260, "pan.___", state 1888, "(1)"
+       line 410, "pan.___", state 1907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 1939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 1953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1971, "(1)"
+       line 256, "pan.___", state 1991, "(1)"
+       line 260, "pan.___", state 1999, "(1)"
+       line 410, "pan.___", state 2020, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2022, "(1)"
+       line 410, "pan.___", state 2023, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 2023, "else"
+       line 410, "pan.___", state 2026, "(1)"
+       line 414, "pan.___", state 2034, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2036, "(1)"
+       line 414, "pan.___", state 2037, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 2037, "else"
+       line 414, "pan.___", state 2040, "(1)"
+       line 414, "pan.___", state 2041, "(1)"
+       line 414, "pan.___", state 2041, "(1)"
+       line 412, "pan.___", state 2046, "((i<1))"
+       line 412, "pan.___", state 2046, "((i>=1))"
+       line 419, "pan.___", state 2052, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2054, "(1)"
+       line 419, "pan.___", state 2055, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 419, "pan.___", state 2055, "else"
+       line 419, "pan.___", state 2058, "(1)"
+       line 419, "pan.___", state 2059, "(1)"
+       line 419, "pan.___", state 2059, "(1)"
+       line 423, "pan.___", state 2066, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2068, "(1)"
+       line 423, "pan.___", state 2069, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 423, "pan.___", state 2069, "else"
+       line 423, "pan.___", state 2072, "(1)"
+       line 423, "pan.___", state 2073, "(1)"
+       line 423, "pan.___", state 2073, "(1)"
+       line 421, "pan.___", state 2078, "((i<2))"
+       line 421, "pan.___", state 2078, "((i>=2))"
+       line 248, "pan.___", state 2084, "(1)"
+       line 252, "pan.___", state 2092, "(1)"
+       line 252, "pan.___", state 2093, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 2093, "else"
+       line 250, "pan.___", state 2098, "((i<1))"
+       line 250, "pan.___", state 2098, "((i>=1))"
+       line 256, "pan.___", state 2104, "(1)"
+       line 256, "pan.___", state 2105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 2105, "else"
+       line 260, "pan.___", state 2112, "(1)"
+       line 260, "pan.___", state 2113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 260, "pan.___", state 2113, "else"
+       line 258, "pan.___", state 2118, "((i<2))"
+       line 258, "pan.___", state 2118, "((i>=2))"
+       line 265, "pan.___", state 2122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 265, "pan.___", state 2122, "else"
+       line 430, "pan.___", state 2124, "(1)"
+       line 430, "pan.___", state 2124, "(1)"
+       line 643, "pan.___", state 2127, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 643, "pan.___", state 2128, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 643, "pan.___", state 2129, "(1)"
+       line 271, "pan.___", state 2133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 2155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 2164, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 2180, "(1)"
+       line 252, "pan.___", state 2188, "(1)"
+       line 256, "pan.___", state 2200, "(1)"
+       line 260, "pan.___", state 2208, "(1)"
+       line 410, "pan.___", state 2226, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2240, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2258, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2272, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 2290, "(1)"
+       line 252, "pan.___", state 2298, "(1)"
+       line 256, "pan.___", state 2310, "(1)"
+       line 260, "pan.___", state 2318, "(1)"
+       line 271, "pan.___", state 2340, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 2349, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 2362, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 2371, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 2387, "(1)"
+       line 252, "pan.___", state 2395, "(1)"
+       line 256, "pan.___", state 2407, "(1)"
+       line 260, "pan.___", state 2415, "(1)"
+       line 410, "pan.___", state 2433, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2447, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2465, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2479, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 2497, "(1)"
+       line 252, "pan.___", state 2505, "(1)"
+       line 256, "pan.___", state 2517, "(1)"
+       line 260, "pan.___", state 2525, "(1)"
+       line 410, "pan.___", state 2544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2558, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 2576, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 2590, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 2608, "(1)"
+       line 252, "pan.___", state 2616, "(1)"
+       line 256, "pan.___", state 2628, "(1)"
+       line 260, "pan.___", state 2636, "(1)"
+       line 248, "pan.___", state 2667, "(1)"
+       line 256, "pan.___", state 2687, "(1)"
+       line 260, "pan.___", state 2695, "(1)"
+       line 248, "pan.___", state 2710, "(1)"
+       line 252, "pan.___", state 2718, "(1)"
+       line 256, "pan.___", state 2730, "(1)"
+       line 260, "pan.___", state 2738, "(1)"
+       line 897, "pan.___", state 2755, "-end-"
+       (259 of 2755 states)
+unreached in proctype urcu_writer
+       line 410, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 82, "(1)"
+       line 252, "pan.___", state 90, "(1)"
+       line 256, "pan.___", state 102, "(1)"
+       line 271, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 257, "(1)"
+       line 252, "pan.___", state 265, "(1)"
+       line 256, "pan.___", state 277, "(1)"
+       line 260, "pan.___", state 285, "(1)"
+       line 414, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, "pan.___", state 378, "(1)"
+       line 256, "pan.___", state 390, "(1)"
+       line 260, "pan.___", state 398, "(1)"
+       line 414, "pan.___", state 441, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, "pan.___", state 499, "(1)"
+       line 256, "pan.___", state 511, "(1)"
+       line 260, "pan.___", state 519, "(1)"
+       line 414, "pan.___", state 552, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 570, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 584, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, "pan.___", state 610, "(1)"
+       line 256, "pan.___", state 622, "(1)"
+       line 260, "pan.___", state 630, "(1)"
+       line 414, "pan.___", state 665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 419, "pan.___", state 683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 423, "pan.___", state 697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 252, "pan.___", state 723, "(1)"
+       line 256, "pan.___", state 735, "(1)"
+       line 260, "pan.___", state 743, "(1)"
+       line 271, "pan.___", state 796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 805, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 820, "(1)"
+       line 283, "pan.___", state 827, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 843, "(1)"
+       line 252, "pan.___", state 851, "(1)"
+       line 256, "pan.___", state 863, "(1)"
+       line 260, "pan.___", state 871, "(1)"
+       line 275, "pan.___", state 896, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 909, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 918, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 934, "(1)"
+       line 252, "pan.___", state 942, "(1)"
+       line 256, "pan.___", state 954, "(1)"
+       line 260, "pan.___", state 962, "(1)"
+       line 275, "pan.___", state 987, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1000, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1009, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1025, "(1)"
+       line 252, "pan.___", state 1033, "(1)"
+       line 256, "pan.___", state 1045, "(1)"
+       line 260, "pan.___", state 1053, "(1)"
+       line 275, "pan.___", state 1078, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 279, "pan.___", state 1091, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1100, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 248, "pan.___", state 1116, "(1)"
+       line 252, "pan.___", state 1124, "(1)"
+       line 256, "pan.___", state 1136, "(1)"
+       line 260, "pan.___", state 1144, "(1)"
+       line 1236, "pan.___", state 1159, "-end-"
+       (71 of 1159 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1299, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 265 seconds
+pan: rate 14518.182 states/second
+pan: avg transition delay 2.812e-06 usec
+cp .input.spin urcu_free.spin.input
+cp .input.spin.trail urcu_free.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free.ltl b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free.ltl
new file mode 100644 (file)
index 0000000..6be1be9
--- /dev/null
@@ -0,0 +1 @@
+[] (!read_poison)
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free.spin.input b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free.spin.input
new file mode 100644 (file)
index 0000000..3191ba1
--- /dev/null
@@ -0,0 +1,1272 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_nested.define b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_nested.define
new file mode 100644 (file)
index 0000000..0fb59bd
--- /dev/null
@@ -0,0 +1 @@
+#define READER_NEST_LEVEL 2
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.define b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.define
new file mode 100644 (file)
index 0000000..d99d793
--- /dev/null
@@ -0,0 +1 @@
+#define NO_MB
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.log b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.log
new file mode 100644 (file)
index 0000000..4cf70c8
--- /dev/null
@@ -0,0 +1,639 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_mb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    8086 States=    1e+06 Transitions= 1.91e+07 Memory=   550.334        t=   51.2 R=   2e+04
+Depth=    8086 States=    2e+06 Transitions= 3.68e+07 Memory=   634.318        t=   98.4 R=   2e+04
+Depth=    8086 States=    3e+06 Transitions= 5.68e+07 Memory=   718.205        t=    153 R=   2e+04
+pan: resizing hashtable to -w22..  done
+Depth=    8086 States=    4e+06 Transitions= 7.01e+07 Memory=   833.213        t=    188 R=   2e+04
+Depth=    8086 States=    5e+06 Transitions= 8.42e+07 Memory=   917.197        t=    225 R=   2e+04
+Depth=    8086 States=    6e+06 Transitions= 9.87e+07 Memory=  1001.182        t=    263 R=   2e+04
+Depth=    8086 States=    7e+06 Transitions= 1.12e+08 Memory=  1085.166        t=    298 R=   2e+04
+Depth=    8086 States=    8e+06 Transitions= 1.28e+08 Memory=  1169.053        t=    342 R=   2e+04
+Depth=    8086 States=    9e+06 Transitions= 1.62e+08 Memory=  1253.037        t=    436 R=   2e+04
+pan: resizing hashtable to -w24..  done
+Depth=    8086 States=    1e+07 Transitions= 1.95e+08 Memory=  1461.018        t=    530 R=   2e+04
+Depth=   13700 States=  1.1e+07 Transitions= 2.31e+08 Memory=  1545.002        t=    629 R=   2e+04
+Depth=   13700 States=  1.2e+07 Transitions= 2.64e+08 Memory=  1628.986        t=    721 R=   2e+04
+Depth=   13700 States=  1.3e+07 Transitions= 2.89e+08 Memory=  1712.971        t=    789 R=   2e+04
+Depth=   13700 States=  1.4e+07 Transitions= 3.09e+08 Memory=  1796.955        t=    844 R=   2e+04
+Depth=   13700 States=  1.5e+07 Transitions= 3.29e+08 Memory=  1880.940        t=    897 R=   2e+04
+pan: claim violated! (at depth 1143)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 13700, errors: 1
+ 15925114 states, stored
+3.2589318e+08 states, matched
+3.418183e+08 transitions (= stored+matched)
+5.3108495e+09 atomic steps
+hash conflicts: 1.8673119e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 1761.735      equivalent memory usage for states (stored*(State-vector + overhead))
+ 1373.571      actual memory usage for states (compression: 77.97%)
+               state-vector as stored = 62 byte + 28 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+ 1958.576      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 21, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 53, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 67, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 85, "(1)"
+       line 257, "pan.___", state 105, "(1)"
+       line 261, "pan.___", state 113, "(1)"
+       line 603, "pan.___", state 132, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 139, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 171, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 185, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 203, "(1)"
+       line 257, "pan.___", state 223, "(1)"
+       line 261, "pan.___", state 231, "(1)"
+       line 411, "pan.___", state 250, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 282, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 296, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 314, "(1)"
+       line 257, "pan.___", state 334, "(1)"
+       line 261, "pan.___", state 342, "(1)"
+       line 411, "pan.___", state 363, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 365, "(1)"
+       line 411, "pan.___", state 366, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 366, "else"
+       line 411, "pan.___", state 369, "(1)"
+       line 415, "pan.___", state 377, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 379, "(1)"
+       line 415, "pan.___", state 380, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 380, "else"
+       line 415, "pan.___", state 383, "(1)"
+       line 415, "pan.___", state 384, "(1)"
+       line 415, "pan.___", state 384, "(1)"
+       line 413, "pan.___", state 389, "((i<1))"
+       line 413, "pan.___", state 389, "((i>=1))"
+       line 420, "pan.___", state 395, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 397, "(1)"
+       line 420, "pan.___", state 398, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 398, "else"
+       line 420, "pan.___", state 401, "(1)"
+       line 420, "pan.___", state 402, "(1)"
+       line 420, "pan.___", state 402, "(1)"
+       line 424, "pan.___", state 409, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 411, "(1)"
+       line 424, "pan.___", state 412, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 412, "else"
+       line 424, "pan.___", state 415, "(1)"
+       line 424, "pan.___", state 416, "(1)"
+       line 424, "pan.___", state 416, "(1)"
+       line 422, "pan.___", state 421, "((i<2))"
+       line 422, "pan.___", state 421, "((i>=2))"
+       line 249, "pan.___", state 427, "(1)"
+       line 253, "pan.___", state 435, "(1)"
+       line 253, "pan.___", state 436, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 436, "else"
+       line 251, "pan.___", state 441, "((i<1))"
+       line 251, "pan.___", state 441, "((i>=1))"
+       line 257, "pan.___", state 447, "(1)"
+       line 257, "pan.___", state 448, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 448, "else"
+       line 261, "pan.___", state 455, "(1)"
+       line 261, "pan.___", state 456, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 456, "else"
+       line 259, "pan.___", state 461, "((i<2))"
+       line 259, "pan.___", state 461, "((i>=2))"
+       line 266, "pan.___", state 465, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 465, "else"
+       line 431, "pan.___", state 467, "(1)"
+       line 431, "pan.___", state 467, "(1)"
+       line 603, "pan.___", state 470, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 603, "pan.___", state 471, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 603, "pan.___", state 472, "(1)"
+       line 272, "pan.___", state 476, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 478, "(1)"
+       line 276, "pan.___", state 485, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 487, "(1)"
+       line 276, "pan.___", state 488, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 488, "else"
+       line 274, "pan.___", state 493, "((i<1))"
+       line 274, "pan.___", state 493, "((i>=1))"
+       line 280, "pan.___", state 498, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 500, "(1)"
+       line 280, "pan.___", state 501, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 501, "else"
+       line 284, "pan.___", state 507, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 509, "(1)"
+       line 284, "pan.___", state 510, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 510, "else"
+       line 282, "pan.___", state 515, "((i<2))"
+       line 282, "pan.___", state 515, "((i>=2))"
+       line 249, "pan.___", state 523, "(1)"
+       line 253, "pan.___", state 531, "(1)"
+       line 253, "pan.___", state 532, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 532, "else"
+       line 251, "pan.___", state 537, "((i<1))"
+       line 251, "pan.___", state 537, "((i>=1))"
+       line 257, "pan.___", state 543, "(1)"
+       line 257, "pan.___", state 544, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 544, "else"
+       line 261, "pan.___", state 551, "(1)"
+       line 261, "pan.___", state 552, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 552, "else"
+       line 266, "pan.___", state 561, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 561, "else"
+       line 299, "pan.___", state 563, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 563, "else"
+       line 411, "pan.___", state 569, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 601, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 615, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 633, "(1)"
+       line 257, "pan.___", state 653, "(1)"
+       line 261, "pan.___", state 661, "(1)"
+       line 411, "pan.___", state 687, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 719, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 733, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 751, "(1)"
+       line 257, "pan.___", state 771, "(1)"
+       line 261, "pan.___", state 779, "(1)"
+       line 411, "pan.___", state 798, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 800, "(1)"
+       line 411, "pan.___", state 801, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 801, "else"
+       line 411, "pan.___", state 804, "(1)"
+       line 415, "pan.___", state 812, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 814, "(1)"
+       line 415, "pan.___", state 815, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 815, "else"
+       line 415, "pan.___", state 818, "(1)"
+       line 415, "pan.___", state 819, "(1)"
+       line 415, "pan.___", state 819, "(1)"
+       line 413, "pan.___", state 824, "((i<1))"
+       line 413, "pan.___", state 824, "((i>=1))"
+       line 420, "pan.___", state 830, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 832, "(1)"
+       line 420, "pan.___", state 833, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 833, "else"
+       line 420, "pan.___", state 836, "(1)"
+       line 420, "pan.___", state 837, "(1)"
+       line 420, "pan.___", state 837, "(1)"
+       line 424, "pan.___", state 844, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 846, "(1)"
+       line 424, "pan.___", state 847, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 847, "else"
+       line 424, "pan.___", state 850, "(1)"
+       line 424, "pan.___", state 851, "(1)"
+       line 424, "pan.___", state 851, "(1)"
+       line 422, "pan.___", state 856, "((i<2))"
+       line 422, "pan.___", state 856, "((i>=2))"
+       line 249, "pan.___", state 862, "(1)"
+       line 253, "pan.___", state 870, "(1)"
+       line 253, "pan.___", state 871, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 871, "else"
+       line 251, "pan.___", state 876, "((i<1))"
+       line 251, "pan.___", state 876, "((i>=1))"
+       line 257, "pan.___", state 882, "(1)"
+       line 257, "pan.___", state 883, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 883, "else"
+       line 261, "pan.___", state 890, "(1)"
+       line 261, "pan.___", state 891, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 891, "else"
+       line 259, "pan.___", state 896, "((i<2))"
+       line 259, "pan.___", state 896, "((i>=2))"
+       line 266, "pan.___", state 900, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 900, "else"
+       line 431, "pan.___", state 902, "(1)"
+       line 431, "pan.___", state 902, "(1)"
+       line 611, "pan.___", state 906, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 911, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 943, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 957, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 975, "(1)"
+       line 257, "pan.___", state 995, "(1)"
+       line 261, "pan.___", state 1003, "(1)"
+       line 411, "pan.___", state 1025, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1057, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1071, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1089, "(1)"
+       line 257, "pan.___", state 1109, "(1)"
+       line 261, "pan.___", state 1117, "(1)"
+       line 411, "pan.___", state 1140, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1172, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1186, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1204, "(1)"
+       line 257, "pan.___", state 1224, "(1)"
+       line 261, "pan.___", state 1232, "(1)"
+       line 411, "pan.___", state 1251, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1283, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1297, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1315, "(1)"
+       line 257, "pan.___", state 1335, "(1)"
+       line 261, "pan.___", state 1343, "(1)"
+       line 272, "pan.___", state 1364, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 1366, "(1)"
+       line 276, "pan.___", state 1373, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1375, "(1)"
+       line 276, "pan.___", state 1376, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 1376, "else"
+       line 274, "pan.___", state 1381, "((i<1))"
+       line 274, "pan.___", state 1381, "((i>=1))"
+       line 280, "pan.___", state 1386, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1388, "(1)"
+       line 280, "pan.___", state 1389, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1389, "else"
+       line 284, "pan.___", state 1395, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1397, "(1)"
+       line 284, "pan.___", state 1398, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1398, "else"
+       line 282, "pan.___", state 1403, "((i<2))"
+       line 282, "pan.___", state 1403, "((i>=2))"
+       line 249, "pan.___", state 1411, "(1)"
+       line 253, "pan.___", state 1419, "(1)"
+       line 253, "pan.___", state 1420, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1420, "else"
+       line 251, "pan.___", state 1425, "((i<1))"
+       line 251, "pan.___", state 1425, "((i>=1))"
+       line 257, "pan.___", state 1431, "(1)"
+       line 257, "pan.___", state 1432, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1432, "else"
+       line 261, "pan.___", state 1439, "(1)"
+       line 261, "pan.___", state 1440, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1440, "else"
+       line 266, "pan.___", state 1449, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1449, "else"
+       line 299, "pan.___", state 1451, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 1451, "else"
+       line 411, "pan.___", state 1457, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1489, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1503, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1521, "(1)"
+       line 257, "pan.___", state 1541, "(1)"
+       line 261, "pan.___", state 1549, "(1)"
+       line 411, "pan.___", state 1568, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1600, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1614, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1632, "(1)"
+       line 257, "pan.___", state 1652, "(1)"
+       line 261, "pan.___", state 1660, "(1)"
+       line 411, "pan.___", state 1682, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1714, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1728, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1746, "(1)"
+       line 257, "pan.___", state 1766, "(1)"
+       line 261, "pan.___", state 1774, "(1)"
+       line 650, "pan.___", state 1793, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 1800, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1832, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1846, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1864, "(1)"
+       line 257, "pan.___", state 1884, "(1)"
+       line 261, "pan.___", state 1892, "(1)"
+       line 411, "pan.___", state 1911, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1943, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1957, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1975, "(1)"
+       line 257, "pan.___", state 1995, "(1)"
+       line 261, "pan.___", state 2003, "(1)"
+       line 411, "pan.___", state 2024, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2026, "(1)"
+       line 411, "pan.___", state 2027, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2027, "else"
+       line 411, "pan.___", state 2030, "(1)"
+       line 415, "pan.___", state 2038, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2040, "(1)"
+       line 415, "pan.___", state 2041, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2041, "else"
+       line 415, "pan.___", state 2044, "(1)"
+       line 415, "pan.___", state 2045, "(1)"
+       line 415, "pan.___", state 2045, "(1)"
+       line 413, "pan.___", state 2050, "((i<1))"
+       line 413, "pan.___", state 2050, "((i>=1))"
+       line 420, "pan.___", state 2056, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2058, "(1)"
+       line 420, "pan.___", state 2059, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2059, "else"
+       line 420, "pan.___", state 2062, "(1)"
+       line 420, "pan.___", state 2063, "(1)"
+       line 420, "pan.___", state 2063, "(1)"
+       line 424, "pan.___", state 2070, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2072, "(1)"
+       line 424, "pan.___", state 2073, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2073, "else"
+       line 424, "pan.___", state 2076, "(1)"
+       line 424, "pan.___", state 2077, "(1)"
+       line 424, "pan.___", state 2077, "(1)"
+       line 422, "pan.___", state 2082, "((i<2))"
+       line 422, "pan.___", state 2082, "((i>=2))"
+       line 249, "pan.___", state 2088, "(1)"
+       line 253, "pan.___", state 2096, "(1)"
+       line 253, "pan.___", state 2097, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2097, "else"
+       line 251, "pan.___", state 2102, "((i<1))"
+       line 251, "pan.___", state 2102, "((i>=1))"
+       line 257, "pan.___", state 2108, "(1)"
+       line 257, "pan.___", state 2109, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2109, "else"
+       line 261, "pan.___", state 2116, "(1)"
+       line 261, "pan.___", state 2117, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2117, "else"
+       line 259, "pan.___", state 2122, "((i<2))"
+       line 259, "pan.___", state 2122, "((i>=2))"
+       line 266, "pan.___", state 2126, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2126, "else"
+       line 431, "pan.___", state 2128, "(1)"
+       line 431, "pan.___", state 2128, "(1)"
+       line 650, "pan.___", state 2131, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 650, "pan.___", state 2132, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 650, "pan.___", state 2133, "(1)"
+       line 272, "pan.___", state 2137, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 2139, "(1)"
+       line 276, "pan.___", state 2146, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2148, "(1)"
+       line 276, "pan.___", state 2149, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 2149, "else"
+       line 274, "pan.___", state 2154, "((i<1))"
+       line 274, "pan.___", state 2154, "((i>=1))"
+       line 280, "pan.___", state 2159, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2161, "(1)"
+       line 280, "pan.___", state 2162, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 2162, "else"
+       line 284, "pan.___", state 2168, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2170, "(1)"
+       line 284, "pan.___", state 2171, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 2171, "else"
+       line 282, "pan.___", state 2176, "((i<2))"
+       line 282, "pan.___", state 2176, "((i>=2))"
+       line 249, "pan.___", state 2184, "(1)"
+       line 253, "pan.___", state 2192, "(1)"
+       line 253, "pan.___", state 2193, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2193, "else"
+       line 251, "pan.___", state 2198, "((i<1))"
+       line 251, "pan.___", state 2198, "((i>=1))"
+       line 257, "pan.___", state 2204, "(1)"
+       line 257, "pan.___", state 2205, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2205, "else"
+       line 261, "pan.___", state 2212, "(1)"
+       line 261, "pan.___", state 2213, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2213, "else"
+       line 266, "pan.___", state 2222, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2222, "else"
+       line 299, "pan.___", state 2224, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 2224, "else"
+       line 411, "pan.___", state 2230, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2262, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2276, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2294, "(1)"
+       line 257, "pan.___", state 2314, "(1)"
+       line 261, "pan.___", state 2322, "(1)"
+       line 272, "pan.___", state 2344, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 2346, "(1)"
+       line 276, "pan.___", state 2353, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2355, "(1)"
+       line 276, "pan.___", state 2356, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 2356, "else"
+       line 274, "pan.___", state 2361, "((i<1))"
+       line 274, "pan.___", state 2361, "((i>=1))"
+       line 280, "pan.___", state 2366, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2368, "(1)"
+       line 280, "pan.___", state 2369, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 2369, "else"
+       line 284, "pan.___", state 2375, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2377, "(1)"
+       line 284, "pan.___", state 2378, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 2378, "else"
+       line 282, "pan.___", state 2383, "((i<2))"
+       line 282, "pan.___", state 2383, "((i>=2))"
+       line 249, "pan.___", state 2391, "(1)"
+       line 253, "pan.___", state 2399, "(1)"
+       line 253, "pan.___", state 2400, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2400, "else"
+       line 251, "pan.___", state 2405, "((i<1))"
+       line 251, "pan.___", state 2405, "((i>=1))"
+       line 257, "pan.___", state 2411, "(1)"
+       line 257, "pan.___", state 2412, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2412, "else"
+       line 261, "pan.___", state 2419, "(1)"
+       line 261, "pan.___", state 2420, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2420, "else"
+       line 266, "pan.___", state 2429, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2429, "else"
+       line 299, "pan.___", state 2431, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 2431, "else"
+       line 411, "pan.___", state 2437, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2469, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2483, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2501, "(1)"
+       line 257, "pan.___", state 2521, "(1)"
+       line 261, "pan.___", state 2529, "(1)"
+       line 411, "pan.___", state 2548, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2580, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2594, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2612, "(1)"
+       line 257, "pan.___", state 2632, "(1)"
+       line 261, "pan.___", state 2640, "(1)"
+       line 249, "pan.___", state 2671, "(1)"
+       line 257, "pan.___", state 2691, "(1)"
+       line 261, "pan.___", state 2699, "(1)"
+       line 249, "pan.___", state 2714, "(1)"
+       line 257, "pan.___", state 2734, "(1)"
+       line 261, "pan.___", state 2742, "(1)"
+       line 898, "pan.___", state 2759, "-end-"
+       (306 of 2759 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 20, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 26, "(1)"
+       line 415, "pan.___", state 34, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 40, "(1)"
+       line 415, "pan.___", state 41, "(1)"
+       line 415, "pan.___", state 41, "(1)"
+       line 413, "pan.___", state 46, "((i<1))"
+       line 413, "pan.___", state 46, "((i>=1))"
+       line 420, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 58, "(1)"
+       line 420, "pan.___", state 59, "(1)"
+       line 420, "pan.___", state 59, "(1)"
+       line 424, "pan.___", state 72, "(1)"
+       line 424, "pan.___", state 73, "(1)"
+       line 424, "pan.___", state 73, "(1)"
+       line 422, "pan.___", state 78, "((i<2))"
+       line 422, "pan.___", state 78, "((i>=2))"
+       line 249, "pan.___", state 84, "(1)"
+       line 253, "pan.___", state 92, "(1)"
+       line 253, "pan.___", state 93, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 93, "else"
+       line 251, "pan.___", state 98, "((i<1))"
+       line 251, "pan.___", state 98, "((i>=1))"
+       line 257, "pan.___", state 104, "(1)"
+       line 257, "pan.___", state 105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 105, "else"
+       line 261, "pan.___", state 112, "(1)"
+       line 261, "pan.___", state 113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 113, "else"
+       line 259, "pan.___", state 118, "((i<2))"
+       line 259, "pan.___", state 118, "((i>=2))"
+       line 266, "pan.___", state 122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 122, "else"
+       line 431, "pan.___", state 124, "(1)"
+       line 431, "pan.___", state 124, "(1)"
+       line 272, "pan.___", state 133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 142, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 150, "((i<1))"
+       line 274, "pan.___", state 150, "((i>=1))"
+       line 280, "pan.___", state 155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 1021, "pan.___", state 183, "old_data = cached_rcu_ptr.val[_pid]"
+       line 1032, "pan.___", state 187, "_proc_urcu_writer = (_proc_urcu_writer|(1<<4))"
+       line 411, "pan.___", state 195, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 201, "(1)"
+       line 415, "pan.___", state 209, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 215, "(1)"
+       line 415, "pan.___", state 216, "(1)"
+       line 415, "pan.___", state 216, "(1)"
+       line 420, "pan.___", state 229, "(1)"
+       line 424, "pan.___", state 241, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 259, "(1)"
+       line 253, "pan.___", state 267, "(1)"
+       line 261, "pan.___", state 287, "(1)"
+       line 431, "pan.___", state 299, "(1)"
+       line 431, "pan.___", state 299, "(1)"
+       line 415, "pan.___", state 322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 380, "(1)"
+       line 261, "pan.___", state 400, "(1)"
+       line 415, "pan.___", state 443, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 501, "(1)"
+       line 415, "pan.___", state 554, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 612, "(1)"
+       line 415, "pan.___", state 667, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 699, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 725, "(1)"
+       line 261, "pan.___", state 745, "(1)"
+       line 1168, "pan.___", state 770, "_proc_urcu_writer = (_proc_urcu_writer|(1<<13))"
+       line 272, "pan.___", state 798, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 800, "(1)"
+       line 276, "pan.___", state 807, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 809, "(1)"
+       line 276, "pan.___", state 810, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 810, "else"
+       line 274, "pan.___", state 815, "((i<1))"
+       line 274, "pan.___", state 815, "((i>=1))"
+       line 280, "pan.___", state 820, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 822, "(1)"
+       line 280, "pan.___", state 823, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 823, "else"
+       line 284, "pan.___", state 829, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 831, "(1)"
+       line 284, "pan.___", state 832, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 832, "else"
+       line 282, "pan.___", state 837, "((i<2))"
+       line 282, "pan.___", state 837, "((i>=2))"
+       line 249, "pan.___", state 845, "(1)"
+       line 253, "pan.___", state 853, "(1)"
+       line 253, "pan.___", state 854, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 854, "else"
+       line 251, "pan.___", state 859, "((i<1))"
+       line 251, "pan.___", state 859, "((i>=1))"
+       line 257, "pan.___", state 865, "(1)"
+       line 257, "pan.___", state 866, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 866, "else"
+       line 261, "pan.___", state 873, "(1)"
+       line 261, "pan.___", state 874, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 874, "else"
+       line 266, "pan.___", state 883, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 883, "else"
+       line 299, "pan.___", state 885, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 885, "else"
+       line 276, "pan.___", state 898, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 911, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 920, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 936, "(1)"
+       line 253, "pan.___", state 944, "(1)"
+       line 257, "pan.___", state 956, "(1)"
+       line 261, "pan.___", state 964, "(1)"
+       line 276, "pan.___", state 989, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1011, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1027, "(1)"
+       line 253, "pan.___", state 1035, "(1)"
+       line 257, "pan.___", state 1047, "(1)"
+       line 261, "pan.___", state 1055, "(1)"
+       line 272, "pan.___", state 1071, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 1073, "(1)"
+       line 276, "pan.___", state 1080, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 1082, "(1)"
+       line 276, "pan.___", state 1083, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 1083, "else"
+       line 274, "pan.___", state 1088, "((i<1))"
+       line 274, "pan.___", state 1088, "((i>=1))"
+       line 280, "pan.___", state 1093, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1095, "(1)"
+       line 280, "pan.___", state 1096, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1096, "else"
+       line 284, "pan.___", state 1102, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1104, "(1)"
+       line 284, "pan.___", state 1105, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1105, "else"
+       line 282, "pan.___", state 1110, "((i<2))"
+       line 282, "pan.___", state 1110, "((i>=2))"
+       line 249, "pan.___", state 1118, "(1)"
+       line 253, "pan.___", state 1126, "(1)"
+       line 253, "pan.___", state 1127, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1127, "else"
+       line 251, "pan.___", state 1132, "((i<1))"
+       line 251, "pan.___", state 1132, "((i>=1))"
+       line 257, "pan.___", state 1138, "(1)"
+       line 257, "pan.___", state 1139, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1139, "else"
+       line 261, "pan.___", state 1146, "(1)"
+       line 261, "pan.___", state 1147, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1147, "else"
+       line 266, "pan.___", state 1156, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1156, "else"
+       line 299, "pan.___", state 1158, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 1158, "else"
+       line 1237, "pan.___", state 1161, "-end-"
+       (113 of 1161 states)
+unreached in proctype :init:
+       line 1248, "pan.___", state 9, "((j<2))"
+       line 1248, "pan.___", state 9, "((j>=2))"
+       line 1249, "pan.___", state 20, "((j<2))"
+       line 1249, "pan.___", state 20, "((j>=2))"
+       line 1254, "pan.___", state 33, "((j<2))"
+       line 1254, "pan.___", state 33, "((j>=2))"
+       line 1252, "pan.___", state 43, "((i<1))"
+       line 1252, "pan.___", state 43, "((i>=1))"
+       line 1262, "pan.___", state 54, "((j<2))"
+       line 1262, "pan.___", state 54, "((j>=2))"
+       line 1266, "pan.___", state 67, "((j<2))"
+       line 1266, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 933 seconds
+pan: rate 17076.039 states/second
+pan: avg transition delay 2.7284e-06 usec
+cp .input.spin urcu_free_no_mb.spin.input
+cp .input.spin.trail urcu_free_no_mb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.spin.input b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.spin.input
new file mode 100644 (file)
index 0000000..a7ea865
--- /dev/null
@@ -0,0 +1,1273 @@
+#define NO_MB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.spin.input.trail b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_mb.spin.input.trail
new file mode 100644 (file)
index 0000000..39bdf85
--- /dev/null
@@ -0,0 +1,1146 @@
+-2:3:-2
+-4:-4:-4
+1:0:4000
+2:3:3920
+3:3:3923
+4:3:3923
+5:3:3926
+6:3:3934
+7:3:3934
+8:3:3937
+9:3:3943
+10:3:3947
+11:3:3947
+12:3:3950
+13:3:3960
+14:3:3968
+15:3:3968
+16:3:3971
+17:3:3977
+18:3:3981
+19:3:3981
+20:3:3984
+21:3:3990
+22:3:3994
+23:3:3995
+24:0:4000
+25:3:3997
+26:0:4000
+27:2:2761
+28:0:4000
+29:2:2767
+30:0:4000
+31:2:2768
+32:0:4000
+33:2:2770
+34:0:4000
+35:2:2771
+36:0:4000
+37:2:2772
+38:0:4000
+39:2:2773
+40:0:4000
+41:2:2774
+42:2:2775
+43:2:2779
+44:2:2780
+45:2:2788
+46:2:2789
+47:2:2793
+48:2:2794
+49:2:2802
+50:2:2807
+51:2:2811
+52:2:2812
+53:2:2820
+54:2:2821
+55:2:2825
+56:2:2826
+57:2:2820
+58:2:2821
+59:2:2825
+60:2:2826
+61:2:2834
+62:2:2839
+63:2:2846
+64:2:2847
+65:2:2854
+66:2:2859
+67:2:2866
+68:2:2867
+69:2:2866
+70:2:2867
+71:2:2874
+72:2:2884
+73:0:4000
+74:2:2773
+75:0:4000
+76:2:2888
+77:2:2892
+78:2:2893
+79:2:2897
+80:2:2901
+81:2:2902
+82:2:2906
+83:2:2914
+84:2:2915
+85:2:2919
+86:2:2923
+87:2:2924
+88:2:2919
+89:2:2920
+90:2:2928
+91:0:4000
+92:2:2773
+93:0:4000
+94:2:2936
+95:2:2937
+96:2:2938
+97:0:4000
+98:2:2773
+99:0:4000
+100:2:2946
+101:0:4000
+102:2:2773
+103:0:4000
+104:2:2949
+105:2:2950
+106:2:2954
+107:2:2955
+108:2:2963
+109:2:2964
+110:2:2968
+111:2:2969
+112:2:2977
+113:2:2982
+114:2:2983
+115:2:2995
+116:2:2996
+117:2:3000
+118:2:3001
+119:2:2995
+120:2:2996
+121:2:3000
+122:2:3001
+123:2:3009
+124:2:3014
+125:2:3021
+126:2:3022
+127:2:3029
+128:2:3034
+129:2:3041
+130:2:3042
+131:2:3041
+132:2:3042
+133:2:3049
+134:2:3058
+135:0:4000
+136:2:2773
+137:0:4000
+138:2:3062
+139:2:3063
+140:2:3064
+141:2:3076
+142:2:3077
+143:2:3081
+144:2:3082
+145:2:3090
+146:2:3095
+147:2:3099
+148:2:3100
+149:2:3108
+150:2:3109
+151:2:3113
+152:2:3114
+153:2:3108
+154:2:3109
+155:2:3113
+156:2:3114
+157:2:3122
+158:2:3127
+159:2:3134
+160:2:3135
+161:2:3142
+162:2:3147
+163:2:3154
+164:2:3155
+165:2:3154
+166:2:3155
+167:2:3162
+168:2:3175
+169:2:3176
+170:0:4000
+171:2:2773
+172:0:4000
+173:2:3183
+174:2:3184
+175:2:3188
+176:2:3189
+177:2:3197
+178:2:3198
+179:2:3202
+180:2:3203
+181:2:3211
+182:2:3216
+183:2:3220
+184:2:3221
+185:2:3229
+186:2:3230
+187:2:3234
+188:2:3235
+189:2:3229
+190:2:3230
+191:2:3234
+192:2:3235
+193:2:3243
+194:2:3248
+195:2:3255
+196:2:3256
+197:2:3263
+198:2:3268
+199:2:3275
+200:2:3276
+201:2:3275
+202:2:3276
+203:2:3283
+204:0:4000
+205:2:2773
+206:0:4000
+207:2:3294
+208:2:3295
+209:2:3299
+210:2:3300
+211:2:3308
+212:2:3309
+213:2:3313
+214:2:3314
+215:2:3322
+216:2:3327
+217:2:3331
+218:2:3332
+219:2:3340
+220:2:3341
+221:2:3345
+222:2:3346
+223:2:3340
+224:2:3341
+225:2:3345
+226:2:3346
+227:2:3354
+228:2:3359
+229:2:3366
+230:2:3367
+231:2:3374
+232:2:3379
+233:2:3386
+234:2:3387
+235:2:3386
+236:2:3387
+237:2:3394
+238:2:3403
+239:0:4000
+240:2:2773
+241:0:4000
+242:2:3407
+243:2:3408
+244:2:3409
+245:2:3421
+246:2:3422
+247:2:3426
+248:2:3427
+249:2:3435
+250:2:3440
+251:2:3444
+252:2:3445
+253:2:3453
+254:2:3454
+255:2:3458
+256:2:3459
+257:2:3453
+258:2:3454
+259:2:3458
+260:2:3459
+261:2:3467
+262:2:3472
+263:2:3479
+264:2:3480
+265:2:3487
+266:2:3492
+267:2:3499
+268:2:3500
+269:2:3499
+270:2:3500
+271:2:3507
+272:2:3519
+273:2:3520
+274:0:4000
+275:2:2773
+276:0:4000
+277:2:3529
+278:2:3530
+279:0:4000
+280:2:2773
+281:0:4000
+282:2:3534
+283:0:4000
+284:2:3542
+285:0:4000
+286:2:2768
+287:0:4000
+288:2:2770
+289:0:4000
+290:2:2771
+291:0:4000
+292:2:2772
+293:0:4000
+294:2:2773
+295:0:4000
+296:2:2774
+297:2:2775
+298:2:2779
+299:2:2780
+300:2:2788
+301:2:2789
+302:2:2793
+303:2:2794
+304:2:2802
+305:2:2807
+306:2:2811
+307:2:2812
+308:2:2820
+309:2:2821
+310:2:2822
+311:2:2820
+312:2:2821
+313:2:2825
+314:2:2826
+315:2:2834
+316:2:2839
+317:2:2846
+318:2:2847
+319:2:2854
+320:2:2859
+321:2:2866
+322:2:2867
+323:2:2866
+324:2:2867
+325:2:2874
+326:2:2884
+327:0:4000
+328:2:2773
+329:0:4000
+330:2:2888
+331:2:2892
+332:2:2893
+333:2:2897
+334:2:2901
+335:2:2902
+336:2:2906
+337:2:2914
+338:2:2915
+339:2:2919
+340:2:2920
+341:2:2919
+342:2:2923
+343:2:2924
+344:2:2928
+345:0:4000
+346:2:2773
+347:0:4000
+348:2:2936
+349:2:2937
+350:2:2938
+351:0:4000
+352:2:2773
+353:0:4000
+354:2:2946
+355:0:4000
+356:2:2773
+357:0:4000
+358:2:2949
+359:2:2950
+360:2:2954
+361:2:2955
+362:2:2963
+363:2:2964
+364:2:2968
+365:2:2969
+366:2:2977
+367:2:2982
+368:2:2983
+369:2:2995
+370:2:2996
+371:2:3000
+372:2:3001
+373:2:2995
+374:2:2996
+375:2:3000
+376:2:3001
+377:2:3009
+378:2:3014
+379:2:3021
+380:2:3022
+381:2:3029
+382:2:3034
+383:2:3041
+384:2:3042
+385:2:3041
+386:2:3042
+387:2:3049
+388:2:3058
+389:0:4000
+390:2:2773
+391:0:4000
+392:2:3062
+393:2:3063
+394:2:3064
+395:2:3076
+396:2:3077
+397:2:3081
+398:2:3082
+399:2:3090
+400:2:3095
+401:2:3099
+402:2:3100
+403:2:3108
+404:2:3109
+405:2:3113
+406:2:3114
+407:2:3108
+408:2:3109
+409:2:3113
+410:2:3114
+411:2:3122
+412:2:3127
+413:2:3134
+414:2:3135
+415:2:3142
+416:2:3147
+417:2:3154
+418:2:3155
+419:2:3154
+420:2:3155
+421:2:3162
+422:2:3175
+423:2:3176
+424:0:4000
+425:2:2773
+426:0:4000
+427:2:3183
+428:2:3184
+429:2:3188
+430:2:3189
+431:2:3197
+432:2:3198
+433:2:3202
+434:2:3203
+435:2:3211
+436:2:3216
+437:2:3220
+438:2:3221
+439:2:3229
+440:2:3230
+441:2:3234
+442:2:3235
+443:2:3229
+444:2:3230
+445:2:3234
+446:2:3235
+447:2:3243
+448:2:3248
+449:2:3255
+450:2:3256
+451:2:3263
+452:2:3268
+453:2:3275
+454:2:3276
+455:2:3275
+456:2:3276
+457:2:3283
+458:0:4000
+459:2:2773
+460:0:4000
+461:2:3294
+462:2:3295
+463:2:3299
+464:2:3300
+465:2:3308
+466:2:3309
+467:2:3313
+468:2:3314
+469:2:3322
+470:2:3327
+471:2:3331
+472:2:3332
+473:2:3340
+474:2:3341
+475:2:3345
+476:2:3346
+477:2:3340
+478:2:3341
+479:2:3345
+480:2:3346
+481:2:3354
+482:2:3359
+483:2:3366
+484:2:3367
+485:2:3374
+486:2:3379
+487:2:3386
+488:2:3387
+489:2:3386
+490:2:3387
+491:2:3394
+492:2:3403
+493:0:4000
+494:2:2773
+495:0:4000
+496:2:3407
+497:2:3408
+498:2:3409
+499:2:3421
+500:2:3422
+501:2:3426
+502:2:3427
+503:2:3435
+504:2:3440
+505:2:3444
+506:2:3445
+507:2:3453
+508:2:3454
+509:2:3458
+510:2:3459
+511:2:3453
+512:2:3454
+513:2:3458
+514:2:3459
+515:2:3467
+516:2:3472
+517:2:3479
+518:2:3480
+519:2:3487
+520:2:3492
+521:2:3499
+522:2:3500
+523:2:3499
+524:2:3500
+525:2:3507
+526:2:3519
+527:2:3520
+528:0:4000
+529:2:2773
+530:0:4000
+531:2:3529
+532:2:3530
+533:0:4000
+534:2:2773
+535:0:4000
+536:2:3534
+537:0:4000
+538:2:3542
+539:0:4000
+540:2:2768
+541:0:4000
+542:2:2770
+543:0:4000
+544:2:2771
+545:0:4000
+546:2:2772
+547:0:4000
+548:2:2773
+549:0:4000
+550:2:2774
+551:2:2775
+552:2:2779
+553:2:2780
+554:2:2788
+555:2:2789
+556:2:2793
+557:2:2794
+558:2:2802
+559:2:2807
+560:2:2811
+561:2:2812
+562:2:2820
+563:2:2821
+564:2:2825
+565:2:2826
+566:2:2820
+567:2:2821
+568:2:2822
+569:2:2834
+570:2:2839
+571:2:2846
+572:2:2847
+573:2:2854
+574:2:2859
+575:2:2866
+576:2:2867
+577:2:2866
+578:2:2867
+579:2:2874
+580:2:2884
+581:0:4000
+582:2:2773
+583:0:4000
+584:2:2888
+585:2:2892
+586:2:2893
+587:2:2897
+588:2:2901
+589:2:2902
+590:2:2906
+591:2:2914
+592:2:2915
+593:2:2919
+594:2:2923
+595:2:2924
+596:2:2919
+597:2:2920
+598:2:2928
+599:0:4000
+600:2:2773
+601:0:4000
+602:2:2936
+603:2:2937
+604:2:2938
+605:0:4000
+606:2:2773
+607:0:4000
+608:2:2946
+609:0:4000
+610:2:2773
+611:0:4000
+612:2:2949
+613:2:2950
+614:2:2954
+615:2:2955
+616:2:2963
+617:2:2964
+618:2:2968
+619:2:2969
+620:2:2977
+621:2:2990
+622:2:2991
+623:2:2995
+624:2:2996
+625:2:3000
+626:2:3001
+627:2:2995
+628:2:2996
+629:2:3000
+630:2:3001
+631:2:3009
+632:2:3014
+633:2:3021
+634:2:3022
+635:2:3029
+636:2:3036
+637:2:3037
+638:2:3041
+639:2:3042
+640:2:3041
+641:2:3042
+642:2:3049
+643:2:3058
+644:0:4000
+645:2:2773
+646:0:4000
+647:2:3062
+648:2:3063
+649:2:3064
+650:2:3076
+651:2:3077
+652:2:3081
+653:2:3082
+654:2:3090
+655:2:3103
+656:2:3104
+657:2:3108
+658:2:3109
+659:2:3113
+660:2:3114
+661:2:3108
+662:2:3109
+663:2:3113
+664:2:3114
+665:2:3122
+666:2:3127
+667:2:3134
+668:2:3135
+669:2:3142
+670:2:3149
+671:2:3150
+672:2:3154
+673:2:3155
+674:2:3154
+675:2:3155
+676:2:3162
+677:2:3175
+678:2:3176
+679:0:4000
+680:2:2773
+681:0:4000
+682:2:3183
+683:2:3184
+684:2:3188
+685:2:3189
+686:2:3197
+687:2:3198
+688:2:3202
+689:2:3203
+690:2:3211
+691:2:3224
+692:2:3225
+693:2:3229
+694:2:3230
+695:2:3234
+696:2:3235
+697:2:3229
+698:2:3230
+699:2:3234
+700:2:3235
+701:2:3243
+702:2:3248
+703:2:3255
+704:2:3256
+705:2:3263
+706:2:3270
+707:2:3271
+708:2:3275
+709:2:3276
+710:2:3275
+711:2:3276
+712:2:3283
+713:0:4000
+714:2:2773
+715:0:4000
+716:2:3407
+717:2:3408
+718:2:3412
+719:2:3413
+720:2:3421
+721:2:3422
+722:2:3426
+723:2:3427
+724:2:3435
+725:2:3448
+726:2:3449
+727:2:3453
+728:2:3454
+729:2:3458
+730:2:3459
+731:2:3453
+732:2:3454
+733:2:3458
+734:2:3459
+735:2:3467
+736:2:3472
+737:2:3479
+738:2:3480
+739:2:3487
+740:2:3494
+741:2:3495
+742:2:3499
+743:2:3500
+744:2:3499
+745:2:3500
+746:2:3507
+747:2:3519
+748:2:3520
+749:0:4000
+750:2:2773
+751:0:4000
+752:2:3529
+753:2:3530
+754:0:4000
+755:2:2773
+756:0:4000
+757:2:3294
+758:2:3295
+759:2:3299
+760:2:3300
+761:2:3308
+762:2:3309
+763:2:3313
+764:2:3314
+765:2:3322
+766:2:3335
+767:2:3336
+768:2:3340
+769:2:3341
+770:2:3342
+771:2:3340
+772:2:3341
+773:2:3345
+774:2:3346
+775:2:3354
+776:2:3359
+777:2:3366
+778:2:3367
+779:2:3374
+780:2:3381
+781:2:3382
+782:2:3386
+783:2:3387
+784:2:3386
+785:2:3387
+786:2:3394
+787:2:3403
+788:0:4000
+789:2:2773
+790:0:4000
+791:2:3534
+792:0:4000
+793:2:3542
+794:0:4000
+795:2:3543
+796:0:4000
+797:2:3548
+798:0:4000
+799:1:2
+800:0:4000
+801:2:3549
+802:0:4000
+803:1:8
+804:0:4000
+805:2:3548
+806:0:4000
+807:1:9
+808:0:4000
+809:2:3549
+810:0:4000
+811:1:10
+812:0:4000
+813:2:3548
+814:0:4000
+815:1:11
+816:0:4000
+817:2:3549
+818:0:4000
+819:1:12
+820:0:4000
+821:2:3548
+822:0:4000
+823:1:13
+824:0:4000
+825:2:3549
+826:0:4000
+827:1:14
+828:0:4000
+829:2:3548
+830:0:4000
+831:1:15
+832:0:4000
+833:2:3549
+834:0:4000
+835:1:16
+836:1:17
+837:1:21
+838:1:22
+839:1:30
+840:1:31
+841:1:35
+842:1:36
+843:1:44
+844:1:49
+845:1:53
+846:1:54
+847:1:62
+848:1:63
+849:1:67
+850:1:68
+851:1:62
+852:1:63
+853:1:67
+854:1:68
+855:1:76
+856:1:81
+857:1:88
+858:1:89
+859:1:96
+860:1:101
+861:1:108
+862:1:109
+863:1:108
+864:1:109
+865:1:116
+866:0:4000
+867:2:3548
+868:0:4000
+869:1:15
+870:0:4000
+871:2:3549
+872:0:4000
+873:1:127
+874:1:128
+875:0:4000
+876:2:3548
+877:0:4000
+878:1:15
+879:0:4000
+880:2:3549
+881:0:4000
+882:1:134
+883:1:135
+884:1:139
+885:1:140
+886:1:148
+887:1:149
+888:1:153
+889:1:154
+890:1:162
+891:1:167
+892:1:171
+893:1:172
+894:1:180
+895:1:181
+896:1:185
+897:1:186
+898:1:180
+899:1:181
+900:1:185
+901:1:186
+902:1:194
+903:1:199
+904:1:206
+905:1:207
+906:1:214
+907:1:219
+908:1:226
+909:1:227
+910:1:226
+911:1:227
+912:1:234
+913:0:4000
+914:2:3548
+915:0:4000
+916:1:15
+917:0:4000
+918:2:3549
+919:0:4000
+920:1:245
+921:1:246
+922:1:250
+923:1:251
+924:1:259
+925:1:260
+926:1:264
+927:1:265
+928:1:273
+929:1:278
+930:1:282
+931:1:283
+932:1:291
+933:1:292
+934:1:296
+935:1:297
+936:1:291
+937:1:292
+938:1:296
+939:1:297
+940:1:305
+941:1:310
+942:1:317
+943:1:318
+944:1:325
+945:1:330
+946:1:337
+947:1:338
+948:1:337
+949:1:338
+950:1:345
+951:1:354
+952:0:4000
+953:2:3548
+954:0:4000
+955:1:15
+956:0:4000
+957:2:3549
+958:0:4000
+959:1:564
+960:1:565
+961:1:569
+962:1:570
+963:1:578
+964:1:579
+965:1:580
+966:1:592
+967:1:597
+968:1:601
+969:1:602
+970:1:610
+971:1:611
+972:1:615
+973:1:616
+974:1:610
+975:1:611
+976:1:615
+977:1:616
+978:1:624
+979:1:629
+980:1:636
+981:1:637
+982:1:644
+983:1:649
+984:1:656
+985:1:657
+986:1:656
+987:1:657
+988:1:664
+989:0:4000
+990:2:3548
+991:0:4000
+992:1:15
+993:0:4000
+994:2:3549
+995:0:4000
+996:1:675
+997:1:678
+998:1:679
+999:0:4000
+1000:2:3548
+1001:0:4000
+1002:1:15
+1003:0:4000
+1004:2:3549
+1005:0:4000
+1006:1:682
+1007:1:683
+1008:1:687
+1009:1:688
+1010:1:696
+1011:1:697
+1012:1:701
+1013:1:702
+1014:1:710
+1015:1:715
+1016:1:719
+1017:1:720
+1018:1:728
+1019:1:729
+1020:1:733
+1021:1:734
+1022:1:728
+1023:1:729
+1024:1:733
+1025:1:734
+1026:1:742
+1027:1:747
+1028:1:754
+1029:1:755
+1030:1:762
+1031:1:767
+1032:1:774
+1033:1:775
+1034:1:774
+1035:1:775
+1036:1:782
+1037:0:4000
+1038:2:3548
+1039:0:4000
+1040:1:15
+1041:0:4000
+1042:2:3549
+1043:0:4000
+1044:1:906
+1045:1:907
+1046:1:911
+1047:1:912
+1048:1:920
+1049:1:921
+1050:1:925
+1051:1:926
+1052:1:934
+1053:1:939
+1054:1:943
+1055:1:944
+1056:1:952
+1057:1:953
+1058:1:957
+1059:1:958
+1060:1:952
+1061:1:953
+1062:1:957
+1063:1:958
+1064:1:966
+1065:1:971
+1066:1:978
+1067:1:979
+1068:1:986
+1069:1:991
+1070:1:998
+1071:1:999
+1072:1:998
+1073:1:999
+1074:1:1006
+1075:1:1015
+1076:1:1019
+1077:0:4000
+1078:2:3548
+1079:0:4000
+1080:1:15
+1081:0:4000
+1082:2:3549
+1083:0:4000
+1084:1:1020
+1085:1:1021
+1086:1:1025
+1087:1:1026
+1088:1:1034
+1089:1:1035
+1090:1:1036
+1091:1:1048
+1092:1:1053
+1093:1:1057
+1094:1:1058
+1095:1:1066
+1096:1:1067
+1097:1:1071
+1098:1:1072
+1099:1:1066
+1100:1:1067
+1101:1:1071
+1102:1:1072
+1103:1:1080
+1104:1:1085
+1105:1:1092
+1106:1:1093
+1107:1:1100
+1108:1:1105
+1109:1:1112
+1110:1:1113
+1111:1:1112
+1112:1:1113
+1113:1:1120
+1114:0:4000
+1115:2:3548
+1116:0:4000
+1117:1:15
+1118:0:4000
+1119:2:3549
+1120:0:4000
+1121:1:1131
+1122:0:4000
+1123:2:3548
+1124:0:4000
+1125:1:2667
+1126:1:2674
+1127:1:2675
+1128:1:2682
+1129:1:2687
+1130:1:2694
+1131:1:2695
+1132:1:2694
+1133:1:2695
+1134:1:2702
+1135:1:2706
+1136:0:4000
+1137:2:3549
+1138:0:4000
+1139:1:1133
+1140:1:1134
+1141:0:3998
+1142:2:3548
+1143:0:4004
+1144:0:4000
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_rmb.define b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_rmb.define
new file mode 100644 (file)
index 0000000..73e61a4
--- /dev/null
@@ -0,0 +1 @@
+#define NO_RMB
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_rmb.log b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_rmb.log
new file mode 100644 (file)
index 0000000..00d7df3
--- /dev/null
@@ -0,0 +1,448 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_rmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    4907 States=    1e+06 Transitions= 2.54e+07 Memory=   550.432        t=   71.2 R=   1e+04
+Depth=    5133 States=    2e+06 Transitions= 5.13e+07 Memory=   634.318        t=    145 R=   1e+04
+Depth=    5133 States=    3e+06 Transitions= 7.76e+07 Memory=   718.303        t=    220 R=   1e+04
+pan: resizing hashtable to -w22..  done
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 5133, errors: 0
+  3846927 states, stored
+ 96650452 states, matched
+1.0049738e+08 transitions (= stored+matched)
+1.6169296e+09 atomic steps
+hash conflicts:  68306904 (resolved)
+
+Stats on memory usage (in Megabytes):
+  425.571      equivalent memory usage for states (stored*(State-vector + overhead))
+  330.939      actual memory usage for states (compression: 77.76%)
+               state-vector as stored = 62 byte + 28 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+  820.420      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 81, "(1)"
+       line 257, "pan.___", state 101, "(1)"
+       line 261, "pan.___", state 109, "(1)"
+       line 597, "pan.___", state 128, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 167, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 199, "(1)"
+       line 257, "pan.___", state 219, "(1)"
+       line 261, "pan.___", state 227, "(1)"
+       line 411, "pan.___", state 246, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 278, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 292, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 310, "(1)"
+       line 257, "pan.___", state 330, "(1)"
+       line 261, "pan.___", state 338, "(1)"
+       line 411, "pan.___", state 359, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 361, "(1)"
+       line 411, "pan.___", state 362, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 362, "else"
+       line 411, "pan.___", state 365, "(1)"
+       line 415, "pan.___", state 373, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 375, "(1)"
+       line 415, "pan.___", state 376, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 376, "else"
+       line 415, "pan.___", state 379, "(1)"
+       line 415, "pan.___", state 380, "(1)"
+       line 415, "pan.___", state 380, "(1)"
+       line 413, "pan.___", state 385, "((i<1))"
+       line 413, "pan.___", state 385, "((i>=1))"
+       line 420, "pan.___", state 391, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 393, "(1)"
+       line 420, "pan.___", state 394, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 394, "else"
+       line 420, "pan.___", state 397, "(1)"
+       line 420, "pan.___", state 398, "(1)"
+       line 420, "pan.___", state 398, "(1)"
+       line 424, "pan.___", state 405, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 407, "(1)"
+       line 424, "pan.___", state 408, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 408, "else"
+       line 424, "pan.___", state 411, "(1)"
+       line 424, "pan.___", state 412, "(1)"
+       line 424, "pan.___", state 412, "(1)"
+       line 422, "pan.___", state 417, "((i<2))"
+       line 422, "pan.___", state 417, "((i>=2))"
+       line 249, "pan.___", state 423, "(1)"
+       line 253, "pan.___", state 431, "(1)"
+       line 253, "pan.___", state 432, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 432, "else"
+       line 251, "pan.___", state 437, "((i<1))"
+       line 251, "pan.___", state 437, "((i>=1))"
+       line 257, "pan.___", state 443, "(1)"
+       line 257, "pan.___", state 444, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 444, "else"
+       line 261, "pan.___", state 451, "(1)"
+       line 261, "pan.___", state 452, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 452, "else"
+       line 259, "pan.___", state 457, "((i<2))"
+       line 259, "pan.___", state 457, "((i>=2))"
+       line 266, "pan.___", state 461, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 461, "else"
+       line 431, "pan.___", state 463, "(1)"
+       line 431, "pan.___", state 463, "(1)"
+       line 597, "pan.___", state 466, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 597, "pan.___", state 467, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 597, "pan.___", state 468, "(1)"
+       line 272, "pan.___", state 472, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 483, "(1)"
+       line 280, "pan.___", state 494, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 503, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 519, "(1)"
+       line 253, "pan.___", state 527, "(1)"
+       line 257, "pan.___", state 539, "(1)"
+       line 261, "pan.___", state 547, "(1)"
+       line 411, "pan.___", state 565, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 579, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 597, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 611, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 629, "(1)"
+       line 253, "pan.___", state 637, "(1)"
+       line 257, "pan.___", state 649, "(1)"
+       line 261, "pan.___", state 657, "(1)"
+       line 411, "pan.___", state 683, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 715, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 729, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 747, "(1)"
+       line 257, "pan.___", state 767, "(1)"
+       line 261, "pan.___", state 775, "(1)"
+       line 411, "pan.___", state 794, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 796, "(1)"
+       line 411, "pan.___", state 797, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 797, "else"
+       line 411, "pan.___", state 800, "(1)"
+       line 415, "pan.___", state 808, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 810, "(1)"
+       line 415, "pan.___", state 811, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 811, "else"
+       line 415, "pan.___", state 814, "(1)"
+       line 415, "pan.___", state 815, "(1)"
+       line 415, "pan.___", state 815, "(1)"
+       line 413, "pan.___", state 820, "((i<1))"
+       line 413, "pan.___", state 820, "((i>=1))"
+       line 420, "pan.___", state 826, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 828, "(1)"
+       line 420, "pan.___", state 829, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 829, "else"
+       line 420, "pan.___", state 832, "(1)"
+       line 420, "pan.___", state 833, "(1)"
+       line 420, "pan.___", state 833, "(1)"
+       line 424, "pan.___", state 840, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 842, "(1)"
+       line 424, "pan.___", state 843, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 843, "else"
+       line 424, "pan.___", state 846, "(1)"
+       line 424, "pan.___", state 847, "(1)"
+       line 424, "pan.___", state 847, "(1)"
+       line 422, "pan.___", state 852, "((i<2))"
+       line 422, "pan.___", state 852, "((i>=2))"
+       line 249, "pan.___", state 858, "(1)"
+       line 253, "pan.___", state 866, "(1)"
+       line 253, "pan.___", state 867, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 867, "else"
+       line 251, "pan.___", state 872, "((i<1))"
+       line 251, "pan.___", state 872, "((i>=1))"
+       line 257, "pan.___", state 878, "(1)"
+       line 257, "pan.___", state 879, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 879, "else"
+       line 261, "pan.___", state 886, "(1)"
+       line 261, "pan.___", state 887, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 887, "else"
+       line 259, "pan.___", state 892, "((i<2))"
+       line 259, "pan.___", state 892, "((i>=2))"
+       line 266, "pan.___", state 896, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 896, "else"
+       line 431, "pan.___", state 898, "(1)"
+       line 431, "pan.___", state 898, "(1)"
+       line 605, "pan.___", state 902, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 921, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 971, "(1)"
+       line 253, "pan.___", state 979, "(1)"
+       line 257, "pan.___", state 991, "(1)"
+       line 261, "pan.___", state 999, "(1)"
+       line 411, "pan.___", state 1021, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1053, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1067, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1085, "(1)"
+       line 257, "pan.___", state 1105, "(1)"
+       line 261, "pan.___", state 1113, "(1)"
+       line 411, "pan.___", state 1136, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1168, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1182, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1200, "(1)"
+       line 257, "pan.___", state 1220, "(1)"
+       line 261, "pan.___", state 1228, "(1)"
+       line 411, "pan.___", state 1247, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1279, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1293, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1311, "(1)"
+       line 257, "pan.___", state 1331, "(1)"
+       line 261, "pan.___", state 1339, "(1)"
+       line 272, "pan.___", state 1360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1382, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1391, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1407, "(1)"
+       line 253, "pan.___", state 1415, "(1)"
+       line 257, "pan.___", state 1427, "(1)"
+       line 261, "pan.___", state 1435, "(1)"
+       line 411, "pan.___", state 1453, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1467, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1485, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1499, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1517, "(1)"
+       line 253, "pan.___", state 1525, "(1)"
+       line 257, "pan.___", state 1537, "(1)"
+       line 261, "pan.___", state 1545, "(1)"
+       line 411, "pan.___", state 1564, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1578, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1596, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1610, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1628, "(1)"
+       line 253, "pan.___", state 1636, "(1)"
+       line 257, "pan.___", state 1648, "(1)"
+       line 261, "pan.___", state 1656, "(1)"
+       line 411, "pan.___", state 1678, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1710, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1724, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1742, "(1)"
+       line 257, "pan.___", state 1762, "(1)"
+       line 261, "pan.___", state 1770, "(1)"
+       line 644, "pan.___", state 1789, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 1796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1828, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1842, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1860, "(1)"
+       line 257, "pan.___", state 1880, "(1)"
+       line 261, "pan.___", state 1888, "(1)"
+       line 411, "pan.___", state 1907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1971, "(1)"
+       line 257, "pan.___", state 1991, "(1)"
+       line 261, "pan.___", state 1999, "(1)"
+       line 411, "pan.___", state 2020, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2022, "(1)"
+       line 411, "pan.___", state 2023, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2023, "else"
+       line 411, "pan.___", state 2026, "(1)"
+       line 415, "pan.___", state 2034, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2036, "(1)"
+       line 415, "pan.___", state 2037, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2037, "else"
+       line 415, "pan.___", state 2040, "(1)"
+       line 415, "pan.___", state 2041, "(1)"
+       line 415, "pan.___", state 2041, "(1)"
+       line 413, "pan.___", state 2046, "((i<1))"
+       line 413, "pan.___", state 2046, "((i>=1))"
+       line 420, "pan.___", state 2052, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2054, "(1)"
+       line 420, "pan.___", state 2055, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2055, "else"
+       line 420, "pan.___", state 2058, "(1)"
+       line 420, "pan.___", state 2059, "(1)"
+       line 420, "pan.___", state 2059, "(1)"
+       line 424, "pan.___", state 2066, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2068, "(1)"
+       line 424, "pan.___", state 2069, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2069, "else"
+       line 424, "pan.___", state 2072, "(1)"
+       line 424, "pan.___", state 2073, "(1)"
+       line 424, "pan.___", state 2073, "(1)"
+       line 422, "pan.___", state 2078, "((i<2))"
+       line 422, "pan.___", state 2078, "((i>=2))"
+       line 249, "pan.___", state 2084, "(1)"
+       line 253, "pan.___", state 2092, "(1)"
+       line 253, "pan.___", state 2093, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2093, "else"
+       line 251, "pan.___", state 2098, "((i<1))"
+       line 251, "pan.___", state 2098, "((i>=1))"
+       line 257, "pan.___", state 2104, "(1)"
+       line 257, "pan.___", state 2105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2105, "else"
+       line 261, "pan.___", state 2112, "(1)"
+       line 261, "pan.___", state 2113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2113, "else"
+       line 259, "pan.___", state 2118, "((i<2))"
+       line 259, "pan.___", state 2118, "((i>=2))"
+       line 266, "pan.___", state 2122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2122, "else"
+       line 431, "pan.___", state 2124, "(1)"
+       line 431, "pan.___", state 2124, "(1)"
+       line 644, "pan.___", state 2127, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 644, "pan.___", state 2128, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 644, "pan.___", state 2129, "(1)"
+       line 272, "pan.___", state 2133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2164, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2180, "(1)"
+       line 253, "pan.___", state 2188, "(1)"
+       line 257, "pan.___", state 2200, "(1)"
+       line 261, "pan.___", state 2208, "(1)"
+       line 411, "pan.___", state 2226, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2240, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2258, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2272, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2290, "(1)"
+       line 253, "pan.___", state 2298, "(1)"
+       line 257, "pan.___", state 2310, "(1)"
+       line 261, "pan.___", state 2318, "(1)"
+       line 272, "pan.___", state 2340, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2349, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2362, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2371, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2387, "(1)"
+       line 253, "pan.___", state 2395, "(1)"
+       line 257, "pan.___", state 2407, "(1)"
+       line 261, "pan.___", state 2415, "(1)"
+       line 411, "pan.___", state 2433, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2447, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2465, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2479, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2497, "(1)"
+       line 253, "pan.___", state 2505, "(1)"
+       line 257, "pan.___", state 2517, "(1)"
+       line 261, "pan.___", state 2525, "(1)"
+       line 411, "pan.___", state 2544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2558, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2576, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2590, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2608, "(1)"
+       line 253, "pan.___", state 2616, "(1)"
+       line 257, "pan.___", state 2628, "(1)"
+       line 261, "pan.___", state 2636, "(1)"
+       line 411, "pan.___", state 2667, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2699, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2713, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2731, "(1)"
+       line 257, "pan.___", state 2751, "(1)"
+       line 261, "pan.___", state 2759, "(1)"
+       line 411, "pan.___", state 2776, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2790, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2808, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2822, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2840, "(1)"
+       line 253, "pan.___", state 2848, "(1)"
+       line 257, "pan.___", state 2860, "(1)"
+       line 261, "pan.___", state 2868, "(1)"
+       line 898, "pan.___", state 2887, "-end-"
+       (266 of 2887 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 82, "(1)"
+       line 253, "pan.___", state 90, "(1)"
+       line 257, "pan.___", state 102, "(1)"
+       line 272, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 257, "(1)"
+       line 253, "pan.___", state 265, "(1)"
+       line 257, "pan.___", state 277, "(1)"
+       line 261, "pan.___", state 285, "(1)"
+       line 415, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 378, "(1)"
+       line 257, "pan.___", state 390, "(1)"
+       line 261, "pan.___", state 398, "(1)"
+       line 415, "pan.___", state 441, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 499, "(1)"
+       line 257, "pan.___", state 511, "(1)"
+       line 261, "pan.___", state 519, "(1)"
+       line 415, "pan.___", state 552, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 570, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 584, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 610, "(1)"
+       line 257, "pan.___", state 622, "(1)"
+       line 261, "pan.___", state 630, "(1)"
+       line 415, "pan.___", state 665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 723, "(1)"
+       line 257, "pan.___", state 735, "(1)"
+       line 261, "pan.___", state 743, "(1)"
+       line 272, "pan.___", state 796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 805, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 820, "(1)"
+       line 284, "pan.___", state 827, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 843, "(1)"
+       line 253, "pan.___", state 851, "(1)"
+       line 257, "pan.___", state 863, "(1)"
+       line 261, "pan.___", state 871, "(1)"
+       line 276, "pan.___", state 896, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 909, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 918, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 934, "(1)"
+       line 253, "pan.___", state 942, "(1)"
+       line 257, "pan.___", state 954, "(1)"
+       line 261, "pan.___", state 962, "(1)"
+       line 276, "pan.___", state 987, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1000, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1009, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1025, "(1)"
+       line 253, "pan.___", state 1033, "(1)"
+       line 257, "pan.___", state 1045, "(1)"
+       line 261, "pan.___", state 1053, "(1)"
+       line 276, "pan.___", state 1078, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1091, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1100, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1116, "(1)"
+       line 253, "pan.___", state 1124, "(1)"
+       line 257, "pan.___", state 1136, "(1)"
+       line 261, "pan.___", state 1144, "(1)"
+       line 1237, "pan.___", state 1159, "-end-"
+       (71 of 1159 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 285 seconds
+pan: rate 13484.269 states/second
+pan: avg transition delay 2.8388e-06 usec
+cp .input.spin urcu_free_no_rmb.spin.input
+cp .input.spin.trail urcu_free_no_rmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_rmb.spin.input b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_rmb.spin.input
new file mode 100644 (file)
index 0000000..d0fd74e
--- /dev/null
@@ -0,0 +1,1273 @@
+#define NO_RMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.define b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.define
new file mode 100644 (file)
index 0000000..710f29d
--- /dev/null
@@ -0,0 +1 @@
+#define NO_WMB
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.log b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.log
new file mode 100644 (file)
index 0000000..6c01490
--- /dev/null
@@ -0,0 +1,467 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_wmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    4736 States=    1e+06 Transitions= 2.14e+07 Memory=   550.334        t=   59.2 R=   2e+04
+Depth=    4922 States=    2e+06 Transitions= 4.72e+07 Memory=   634.221        t=    132 R=   2e+04
+Depth=    4922 States=    3e+06 Transitions= 7.19e+07 Memory=   718.205        t=    203 R=   1e+04
+pan: resizing hashtable to -w22..  done
+pan: claim violated! (at depth 1295)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 4922, errors: 1
+  3435201 states, stored
+ 77730101 states, matched
+ 81165302 transitions (= stored+matched)
+1.2953753e+09 atomic steps
+hash conflicts:  60833788 (resolved)
+
+Stats on memory usage (in Megabytes):
+  380.023      equivalent memory usage for states (stored*(State-vector + overhead))
+  296.169      actual memory usage for states (compression: 77.93%)
+               state-vector as stored = 62 byte + 28 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+  785.752      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 81, "(1)"
+       line 257, "pan.___", state 101, "(1)"
+       line 261, "pan.___", state 109, "(1)"
+       line 597, "pan.___", state 128, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 167, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 199, "(1)"
+       line 257, "pan.___", state 219, "(1)"
+       line 261, "pan.___", state 227, "(1)"
+       line 411, "pan.___", state 246, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 278, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 292, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 310, "(1)"
+       line 257, "pan.___", state 330, "(1)"
+       line 261, "pan.___", state 338, "(1)"
+       line 411, "pan.___", state 359, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 361, "(1)"
+       line 411, "pan.___", state 362, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 362, "else"
+       line 411, "pan.___", state 365, "(1)"
+       line 415, "pan.___", state 373, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 375, "(1)"
+       line 415, "pan.___", state 376, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 376, "else"
+       line 415, "pan.___", state 379, "(1)"
+       line 415, "pan.___", state 380, "(1)"
+       line 415, "pan.___", state 380, "(1)"
+       line 413, "pan.___", state 385, "((i<1))"
+       line 413, "pan.___", state 385, "((i>=1))"
+       line 420, "pan.___", state 391, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 393, "(1)"
+       line 420, "pan.___", state 394, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 394, "else"
+       line 420, "pan.___", state 397, "(1)"
+       line 420, "pan.___", state 398, "(1)"
+       line 420, "pan.___", state 398, "(1)"
+       line 424, "pan.___", state 405, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 407, "(1)"
+       line 424, "pan.___", state 408, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 408, "else"
+       line 424, "pan.___", state 411, "(1)"
+       line 424, "pan.___", state 412, "(1)"
+       line 424, "pan.___", state 412, "(1)"
+       line 422, "pan.___", state 417, "((i<2))"
+       line 422, "pan.___", state 417, "((i>=2))"
+       line 249, "pan.___", state 423, "(1)"
+       line 253, "pan.___", state 431, "(1)"
+       line 253, "pan.___", state 432, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 432, "else"
+       line 251, "pan.___", state 437, "((i<1))"
+       line 251, "pan.___", state 437, "((i>=1))"
+       line 257, "pan.___", state 443, "(1)"
+       line 257, "pan.___", state 444, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 444, "else"
+       line 261, "pan.___", state 451, "(1)"
+       line 261, "pan.___", state 452, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 452, "else"
+       line 259, "pan.___", state 457, "((i<2))"
+       line 259, "pan.___", state 457, "((i>=2))"
+       line 266, "pan.___", state 461, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 461, "else"
+       line 431, "pan.___", state 463, "(1)"
+       line 431, "pan.___", state 463, "(1)"
+       line 597, "pan.___", state 466, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 597, "pan.___", state 467, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 597, "pan.___", state 468, "(1)"
+       line 272, "pan.___", state 472, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 483, "(1)"
+       line 280, "pan.___", state 494, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 503, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 519, "(1)"
+       line 253, "pan.___", state 527, "(1)"
+       line 257, "pan.___", state 539, "(1)"
+       line 261, "pan.___", state 547, "(1)"
+       line 411, "pan.___", state 565, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 579, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 597, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 611, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 629, "(1)"
+       line 253, "pan.___", state 637, "(1)"
+       line 257, "pan.___", state 649, "(1)"
+       line 261, "pan.___", state 657, "(1)"
+       line 411, "pan.___", state 683, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 715, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 729, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 747, "(1)"
+       line 257, "pan.___", state 767, "(1)"
+       line 261, "pan.___", state 775, "(1)"
+       line 411, "pan.___", state 794, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 796, "(1)"
+       line 411, "pan.___", state 797, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 797, "else"
+       line 411, "pan.___", state 800, "(1)"
+       line 415, "pan.___", state 808, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 810, "(1)"
+       line 415, "pan.___", state 811, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 811, "else"
+       line 415, "pan.___", state 814, "(1)"
+       line 415, "pan.___", state 815, "(1)"
+       line 415, "pan.___", state 815, "(1)"
+       line 413, "pan.___", state 820, "((i<1))"
+       line 413, "pan.___", state 820, "((i>=1))"
+       line 420, "pan.___", state 826, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 828, "(1)"
+       line 420, "pan.___", state 829, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 829, "else"
+       line 420, "pan.___", state 832, "(1)"
+       line 420, "pan.___", state 833, "(1)"
+       line 420, "pan.___", state 833, "(1)"
+       line 424, "pan.___", state 840, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 842, "(1)"
+       line 424, "pan.___", state 843, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 843, "else"
+       line 424, "pan.___", state 846, "(1)"
+       line 424, "pan.___", state 847, "(1)"
+       line 424, "pan.___", state 847, "(1)"
+       line 422, "pan.___", state 852, "((i<2))"
+       line 422, "pan.___", state 852, "((i>=2))"
+       line 249, "pan.___", state 858, "(1)"
+       line 253, "pan.___", state 866, "(1)"
+       line 253, "pan.___", state 867, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 867, "else"
+       line 251, "pan.___", state 872, "((i<1))"
+       line 251, "pan.___", state 872, "((i>=1))"
+       line 257, "pan.___", state 878, "(1)"
+       line 257, "pan.___", state 879, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 879, "else"
+       line 261, "pan.___", state 886, "(1)"
+       line 261, "pan.___", state 887, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 887, "else"
+       line 259, "pan.___", state 892, "((i<2))"
+       line 259, "pan.___", state 892, "((i>=2))"
+       line 266, "pan.___", state 896, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 896, "else"
+       line 431, "pan.___", state 898, "(1)"
+       line 431, "pan.___", state 898, "(1)"
+       line 605, "pan.___", state 902, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 921, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 971, "(1)"
+       line 253, "pan.___", state 979, "(1)"
+       line 257, "pan.___", state 991, "(1)"
+       line 261, "pan.___", state 999, "(1)"
+       line 411, "pan.___", state 1021, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1053, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1067, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1085, "(1)"
+       line 257, "pan.___", state 1105, "(1)"
+       line 261, "pan.___", state 1113, "(1)"
+       line 411, "pan.___", state 1136, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1168, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1182, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1200, "(1)"
+       line 257, "pan.___", state 1220, "(1)"
+       line 261, "pan.___", state 1228, "(1)"
+       line 411, "pan.___", state 1247, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1279, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1293, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1311, "(1)"
+       line 257, "pan.___", state 1331, "(1)"
+       line 261, "pan.___", state 1339, "(1)"
+       line 272, "pan.___", state 1360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1382, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1391, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1407, "(1)"
+       line 253, "pan.___", state 1415, "(1)"
+       line 257, "pan.___", state 1427, "(1)"
+       line 261, "pan.___", state 1435, "(1)"
+       line 411, "pan.___", state 1453, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1467, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1485, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1499, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1517, "(1)"
+       line 253, "pan.___", state 1525, "(1)"
+       line 257, "pan.___", state 1537, "(1)"
+       line 261, "pan.___", state 1545, "(1)"
+       line 411, "pan.___", state 1564, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1578, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1596, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1610, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1628, "(1)"
+       line 253, "pan.___", state 1636, "(1)"
+       line 257, "pan.___", state 1648, "(1)"
+       line 261, "pan.___", state 1656, "(1)"
+       line 411, "pan.___", state 1678, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1710, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1724, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1742, "(1)"
+       line 257, "pan.___", state 1762, "(1)"
+       line 261, "pan.___", state 1770, "(1)"
+       line 644, "pan.___", state 1789, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 1796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1828, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1842, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1860, "(1)"
+       line 257, "pan.___", state 1880, "(1)"
+       line 261, "pan.___", state 1888, "(1)"
+       line 411, "pan.___", state 1907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1971, "(1)"
+       line 257, "pan.___", state 1991, "(1)"
+       line 261, "pan.___", state 1999, "(1)"
+       line 411, "pan.___", state 2020, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2022, "(1)"
+       line 411, "pan.___", state 2023, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2023, "else"
+       line 411, "pan.___", state 2026, "(1)"
+       line 415, "pan.___", state 2034, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2036, "(1)"
+       line 415, "pan.___", state 2037, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2037, "else"
+       line 415, "pan.___", state 2040, "(1)"
+       line 415, "pan.___", state 2041, "(1)"
+       line 415, "pan.___", state 2041, "(1)"
+       line 413, "pan.___", state 2046, "((i<1))"
+       line 413, "pan.___", state 2046, "((i>=1))"
+       line 420, "pan.___", state 2052, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2054, "(1)"
+       line 420, "pan.___", state 2055, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2055, "else"
+       line 420, "pan.___", state 2058, "(1)"
+       line 420, "pan.___", state 2059, "(1)"
+       line 420, "pan.___", state 2059, "(1)"
+       line 424, "pan.___", state 2066, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2068, "(1)"
+       line 424, "pan.___", state 2069, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2069, "else"
+       line 424, "pan.___", state 2072, "(1)"
+       line 424, "pan.___", state 2073, "(1)"
+       line 424, "pan.___", state 2073, "(1)"
+       line 422, "pan.___", state 2078, "((i<2))"
+       line 422, "pan.___", state 2078, "((i>=2))"
+       line 249, "pan.___", state 2084, "(1)"
+       line 253, "pan.___", state 2092, "(1)"
+       line 253, "pan.___", state 2093, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2093, "else"
+       line 251, "pan.___", state 2098, "((i<1))"
+       line 251, "pan.___", state 2098, "((i>=1))"
+       line 257, "pan.___", state 2104, "(1)"
+       line 257, "pan.___", state 2105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2105, "else"
+       line 261, "pan.___", state 2112, "(1)"
+       line 261, "pan.___", state 2113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2113, "else"
+       line 259, "pan.___", state 2118, "((i<2))"
+       line 259, "pan.___", state 2118, "((i>=2))"
+       line 266, "pan.___", state 2122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2122, "else"
+       line 431, "pan.___", state 2124, "(1)"
+       line 431, "pan.___", state 2124, "(1)"
+       line 644, "pan.___", state 2127, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 644, "pan.___", state 2128, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 644, "pan.___", state 2129, "(1)"
+       line 272, "pan.___", state 2133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2164, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2180, "(1)"
+       line 253, "pan.___", state 2188, "(1)"
+       line 257, "pan.___", state 2200, "(1)"
+       line 261, "pan.___", state 2208, "(1)"
+       line 411, "pan.___", state 2226, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2240, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2258, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2272, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2290, "(1)"
+       line 253, "pan.___", state 2298, "(1)"
+       line 257, "pan.___", state 2310, "(1)"
+       line 261, "pan.___", state 2318, "(1)"
+       line 272, "pan.___", state 2340, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2349, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2362, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2371, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2387, "(1)"
+       line 253, "pan.___", state 2395, "(1)"
+       line 257, "pan.___", state 2407, "(1)"
+       line 261, "pan.___", state 2415, "(1)"
+       line 411, "pan.___", state 2433, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2447, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2465, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2479, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2497, "(1)"
+       line 253, "pan.___", state 2505, "(1)"
+       line 257, "pan.___", state 2517, "(1)"
+       line 261, "pan.___", state 2525, "(1)"
+       line 411, "pan.___", state 2544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2558, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2576, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2590, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2608, "(1)"
+       line 253, "pan.___", state 2616, "(1)"
+       line 257, "pan.___", state 2628, "(1)"
+       line 261, "pan.___", state 2636, "(1)"
+       line 249, "pan.___", state 2667, "(1)"
+       line 257, "pan.___", state 2687, "(1)"
+       line 261, "pan.___", state 2695, "(1)"
+       line 249, "pan.___", state 2710, "(1)"
+       line 253, "pan.___", state 2718, "(1)"
+       line 257, "pan.___", state 2730, "(1)"
+       line 261, "pan.___", state 2738, "(1)"
+       line 898, "pan.___", state 2755, "-end-"
+       (259 of 2755 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 19, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 33, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 83, "(1)"
+       line 253, "pan.___", state 91, "(1)"
+       line 272, "pan.___", state 132, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 134, "(1)"
+       line 276, "pan.___", state 141, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 143, "(1)"
+       line 276, "pan.___", state 144, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 144, "else"
+       line 274, "pan.___", state 149, "((i<1))"
+       line 274, "pan.___", state 149, "((i>=1))"
+       line 280, "pan.___", state 154, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 156, "(1)"
+       line 280, "pan.___", state 157, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 157, "else"
+       line 284, "pan.___", state 163, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 165, "(1)"
+       line 284, "pan.___", state 166, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 166, "else"
+       line 289, "pan.___", state 175, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 289, "pan.___", state 175, "else"
+       line 411, "pan.___", state 194, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 208, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 226, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 240, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 258, "(1)"
+       line 253, "pan.___", state 266, "(1)"
+       line 257, "pan.___", state 278, "(1)"
+       line 261, "pan.___", state 286, "(1)"
+       line 415, "pan.___", state 321, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 339, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 353, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 379, "(1)"
+       line 257, "pan.___", state 391, "(1)"
+       line 261, "pan.___", state 399, "(1)"
+       line 415, "pan.___", state 442, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 460, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 474, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 500, "(1)"
+       line 257, "pan.___", state 512, "(1)"
+       line 261, "pan.___", state 520, "(1)"
+       line 415, "pan.___", state 553, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 571, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 585, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 611, "(1)"
+       line 257, "pan.___", state 623, "(1)"
+       line 261, "pan.___", state 631, "(1)"
+       line 415, "pan.___", state 666, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 684, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 698, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 724, "(1)"
+       line 257, "pan.___", state 736, "(1)"
+       line 261, "pan.___", state 744, "(1)"
+       line 272, "pan.___", state 797, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 806, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 844, "(1)"
+       line 253, "pan.___", state 852, "(1)"
+       line 257, "pan.___", state 864, "(1)"
+       line 261, "pan.___", state 872, "(1)"
+       line 276, "pan.___", state 897, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 910, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 919, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 935, "(1)"
+       line 253, "pan.___", state 943, "(1)"
+       line 257, "pan.___", state 955, "(1)"
+       line 261, "pan.___", state 963, "(1)"
+       line 276, "pan.___", state 988, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1001, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1010, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1026, "(1)"
+       line 253, "pan.___", state 1034, "(1)"
+       line 257, "pan.___", state 1046, "(1)"
+       line 261, "pan.___", state 1054, "(1)"
+       line 276, "pan.___", state 1079, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1092, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1101, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1117, "(1)"
+       line 253, "pan.___", state 1125, "(1)"
+       line 257, "pan.___", state 1137, "(1)"
+       line 261, "pan.___", state 1145, "(1)"
+       line 1237, "pan.___", state 1160, "-end-"
+       (77 of 1160 states)
+unreached in proctype :init:
+       line 1248, "pan.___", state 9, "((j<2))"
+       line 1248, "pan.___", state 9, "((j>=2))"
+       line 1249, "pan.___", state 20, "((j<2))"
+       line 1249, "pan.___", state 20, "((j>=2))"
+       line 1254, "pan.___", state 33, "((j<2))"
+       line 1254, "pan.___", state 33, "((j>=2))"
+       line 1252, "pan.___", state 43, "((i<1))"
+       line 1252, "pan.___", state 43, "((i>=1))"
+       line 1262, "pan.___", state 54, "((j<2))"
+       line 1262, "pan.___", state 54, "((j>=2))"
+       line 1266, "pan.___", state 67, "((j<2))"
+       line 1266, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 229 seconds
+pan: rate  14976.68 states/second
+pan: avg transition delay 2.826e-06 usec
+cp .input.spin urcu_free_no_wmb.spin.input
+cp .input.spin.trail urcu_free_no_wmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.spin.input b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.spin.input
new file mode 100644 (file)
index 0000000..dc68ded
--- /dev/null
@@ -0,0 +1,1273 @@
+#define NO_WMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.spin.input.trail b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_no_wmb.spin.input.trail
new file mode 100644 (file)
index 0000000..aca9da8
--- /dev/null
@@ -0,0 +1,1298 @@
+-2:3:-2
+-4:-4:-4
+1:0:3995
+2:3:3915
+3:3:3918
+4:3:3918
+5:3:3921
+6:3:3929
+7:3:3929
+8:3:3932
+9:3:3938
+10:3:3942
+11:3:3942
+12:3:3945
+13:3:3955
+14:3:3963
+15:3:3963
+16:3:3966
+17:3:3972
+18:3:3976
+19:3:3976
+20:3:3979
+21:3:3985
+22:3:3989
+23:3:3990
+24:0:3995
+25:3:3992
+26:0:3995
+27:2:2757
+28:0:3995
+29:2:2763
+30:0:3995
+31:2:2764
+32:0:3995
+33:2:2766
+34:0:3995
+35:2:2767
+36:0:3995
+37:2:2768
+38:0:3995
+39:2:2769
+40:2:2770
+41:2:2774
+42:2:2775
+43:2:2783
+44:2:2784
+45:2:2788
+46:2:2789
+47:2:2797
+48:2:2802
+49:2:2806
+50:2:2807
+51:2:2815
+52:2:2816
+53:2:2820
+54:2:2821
+55:2:2815
+56:2:2816
+57:2:2820
+58:2:2821
+59:2:2829
+60:2:2834
+61:2:2841
+62:2:2842
+63:2:2849
+64:2:2854
+65:2:2861
+66:2:2862
+67:2:2861
+68:2:2862
+69:2:2869
+70:2:2879
+71:0:3995
+72:2:2768
+73:0:3995
+74:2:2931
+75:2:2932
+76:2:2933
+77:0:3995
+78:2:2768
+79:0:3995
+80:2:2938
+81:0:3995
+82:2:3552
+83:2:3553
+84:2:3557
+85:2:3561
+86:2:3562
+87:2:3566
+88:2:3571
+89:2:3579
+90:2:3583
+91:2:3584
+92:2:3579
+93:2:3580
+94:2:3588
+95:2:3595
+96:2:3602
+97:2:3603
+98:2:3610
+99:2:3615
+100:2:3622
+101:2:3623
+102:2:3622
+103:2:3623
+104:2:3630
+105:2:3634
+106:0:3995
+107:2:2940
+108:2:3533
+109:0:3995
+110:2:2768
+111:0:3995
+112:2:2941
+113:0:3995
+114:2:2768
+115:0:3995
+116:2:2944
+117:2:2945
+118:2:2949
+119:2:2950
+120:2:2958
+121:2:2959
+122:2:2963
+123:2:2964
+124:2:2972
+125:2:2977
+126:2:2981
+127:2:2982
+128:2:2990
+129:2:2991
+130:2:2995
+131:2:2996
+132:2:2990
+133:2:2991
+134:2:2995
+135:2:2996
+136:2:3004
+137:2:3009
+138:2:3016
+139:2:3017
+140:2:3024
+141:2:3029
+142:2:3036
+143:2:3037
+144:2:3036
+145:2:3037
+146:2:3044
+147:2:3053
+148:0:3995
+149:2:2768
+150:0:3995
+151:2:3057
+152:2:3058
+153:2:3059
+154:2:3071
+155:2:3072
+156:2:3076
+157:2:3077
+158:2:3085
+159:2:3090
+160:2:3094
+161:2:3095
+162:2:3103
+163:2:3104
+164:2:3108
+165:2:3109
+166:2:3103
+167:2:3104
+168:2:3108
+169:2:3109
+170:2:3117
+171:2:3122
+172:2:3129
+173:2:3130
+174:2:3137
+175:2:3142
+176:2:3149
+177:2:3150
+178:2:3149
+179:2:3150
+180:2:3157
+181:2:3170
+182:2:3171
+183:0:3995
+184:2:2768
+185:0:3995
+186:2:3178
+187:2:3179
+188:2:3183
+189:2:3184
+190:2:3192
+191:2:3193
+192:2:3197
+193:2:3198
+194:2:3206
+195:2:3211
+196:2:3215
+197:2:3216
+198:2:3224
+199:2:3225
+200:2:3229
+201:2:3230
+202:2:3224
+203:2:3225
+204:2:3229
+205:2:3230
+206:2:3238
+207:2:3243
+208:2:3250
+209:2:3251
+210:2:3258
+211:2:3263
+212:2:3270
+213:2:3271
+214:2:3270
+215:2:3271
+216:2:3278
+217:0:3995
+218:2:2768
+219:0:3995
+220:2:3289
+221:2:3290
+222:2:3294
+223:2:3295
+224:2:3303
+225:2:3304
+226:2:3308
+227:2:3309
+228:2:3317
+229:2:3322
+230:2:3326
+231:2:3327
+232:2:3335
+233:2:3336
+234:2:3340
+235:2:3341
+236:2:3335
+237:2:3336
+238:2:3340
+239:2:3341
+240:2:3349
+241:2:3354
+242:2:3361
+243:2:3362
+244:2:3369
+245:2:3374
+246:2:3381
+247:2:3382
+248:2:3381
+249:2:3382
+250:2:3389
+251:2:3398
+252:0:3995
+253:2:2768
+254:0:3995
+255:2:3402
+256:2:3403
+257:2:3404
+258:2:3416
+259:2:3417
+260:2:3421
+261:2:3422
+262:2:3430
+263:2:3435
+264:2:3439
+265:2:3440
+266:2:3448
+267:2:3449
+268:2:3453
+269:2:3454
+270:2:3448
+271:2:3449
+272:2:3453
+273:2:3454
+274:2:3462
+275:2:3467
+276:2:3474
+277:2:3475
+278:2:3482
+279:2:3487
+280:2:3494
+281:2:3495
+282:2:3494
+283:2:3495
+284:2:3502
+285:2:3514
+286:2:3515
+287:0:3995
+288:2:2768
+289:0:3995
+290:2:3521
+291:0:3995
+292:2:3825
+293:2:3826
+294:2:3830
+295:2:3834
+296:2:3835
+297:2:3839
+298:2:3847
+299:2:3848
+300:2:3852
+301:2:3856
+302:2:3857
+303:2:3852
+304:2:3856
+305:2:3857
+306:2:3861
+307:2:3868
+308:2:3875
+309:2:3876
+310:2:3883
+311:2:3888
+312:2:3895
+313:2:3896
+314:2:3895
+315:2:3896
+316:2:3903
+317:2:3907
+318:0:3995
+319:2:3523
+320:2:3533
+321:0:3995
+322:2:2768
+323:0:3995
+324:2:3524
+325:2:3525
+326:0:3995
+327:2:2768
+328:0:3995
+329:2:3529
+330:0:3995
+331:2:3537
+332:0:3995
+333:2:2764
+334:0:3995
+335:2:2766
+336:0:3995
+337:2:2767
+338:0:3995
+339:2:2768
+340:0:3995
+341:2:2931
+342:2:2932
+343:2:2933
+344:0:3995
+345:2:2768
+346:0:3995
+347:2:2769
+348:2:2770
+349:2:2774
+350:2:2775
+351:2:2783
+352:2:2784
+353:2:2788
+354:2:2789
+355:2:2797
+356:2:2802
+357:2:2803
+358:2:2815
+359:2:2816
+360:2:2817
+361:2:2815
+362:2:2816
+363:2:2820
+364:2:2821
+365:2:2829
+366:2:2834
+367:2:2841
+368:2:2842
+369:2:2849
+370:2:2854
+371:2:2861
+372:2:2862
+373:2:2861
+374:2:2862
+375:2:2869
+376:2:2879
+377:0:3995
+378:2:2768
+379:0:3995
+380:2:2938
+381:0:3995
+382:2:3552
+383:2:3553
+384:2:3557
+385:2:3561
+386:2:3562
+387:2:3566
+388:2:3574
+389:2:3575
+390:2:3579
+391:2:3580
+392:2:3579
+393:2:3583
+394:2:3584
+395:2:3588
+396:2:3595
+397:2:3602
+398:2:3603
+399:2:3610
+400:2:3615
+401:2:3622
+402:2:3623
+403:2:3622
+404:2:3623
+405:2:3630
+406:2:3634
+407:0:3995
+408:2:2940
+409:2:3533
+410:0:3995
+411:2:2768
+412:0:3995
+413:2:2941
+414:0:3995
+415:2:2768
+416:0:3995
+417:2:2944
+418:2:2945
+419:2:2949
+420:2:2950
+421:2:2958
+422:2:2959
+423:2:2963
+424:2:2964
+425:2:2972
+426:2:2977
+427:2:2981
+428:2:2982
+429:2:2990
+430:2:2991
+431:2:2995
+432:2:2996
+433:2:2990
+434:2:2991
+435:2:2995
+436:2:2996
+437:2:3004
+438:2:3009
+439:2:3016
+440:2:3017
+441:2:3024
+442:2:3029
+443:2:3036
+444:2:3037
+445:2:3036
+446:2:3037
+447:2:3044
+448:2:3053
+449:0:3995
+450:2:2768
+451:0:3995
+452:2:3057
+453:2:3058
+454:2:3059
+455:2:3071
+456:2:3072
+457:2:3076
+458:2:3077
+459:2:3085
+460:2:3090
+461:2:3094
+462:2:3095
+463:2:3103
+464:2:3104
+465:2:3108
+466:2:3109
+467:2:3103
+468:2:3104
+469:2:3108
+470:2:3109
+471:2:3117
+472:2:3122
+473:2:3129
+474:2:3130
+475:2:3137
+476:2:3142
+477:2:3149
+478:2:3150
+479:2:3149
+480:2:3150
+481:2:3157
+482:2:3170
+483:2:3171
+484:0:3995
+485:2:2768
+486:0:3995
+487:2:3178
+488:2:3179
+489:2:3183
+490:2:3184
+491:2:3192
+492:2:3193
+493:2:3197
+494:2:3198
+495:2:3206
+496:2:3211
+497:2:3215
+498:2:3216
+499:2:3224
+500:2:3225
+501:2:3229
+502:2:3230
+503:2:3224
+504:2:3225
+505:2:3229
+506:2:3230
+507:2:3238
+508:2:3243
+509:2:3250
+510:2:3251
+511:2:3258
+512:2:3263
+513:2:3270
+514:2:3271
+515:2:3270
+516:2:3271
+517:2:3278
+518:0:3995
+519:2:2768
+520:0:3995
+521:2:3289
+522:2:3290
+523:2:3294
+524:2:3295
+525:2:3303
+526:2:3304
+527:2:3308
+528:2:3309
+529:2:3317
+530:2:3322
+531:2:3326
+532:2:3327
+533:2:3335
+534:2:3336
+535:2:3340
+536:2:3341
+537:2:3335
+538:2:3336
+539:2:3340
+540:2:3341
+541:2:3349
+542:2:3354
+543:2:3361
+544:2:3362
+545:2:3369
+546:2:3374
+547:2:3381
+548:2:3382
+549:2:3381
+550:2:3382
+551:2:3389
+552:2:3398
+553:0:3995
+554:2:2768
+555:0:3995
+556:2:3402
+557:2:3403
+558:2:3404
+559:2:3416
+560:2:3417
+561:2:3421
+562:2:3422
+563:2:3430
+564:2:3435
+565:2:3439
+566:2:3440
+567:2:3448
+568:2:3449
+569:2:3453
+570:2:3454
+571:2:3448
+572:2:3449
+573:2:3453
+574:2:3454
+575:2:3462
+576:2:3467
+577:2:3474
+578:2:3475
+579:2:3482
+580:2:3487
+581:2:3494
+582:2:3495
+583:2:3494
+584:2:3495
+585:2:3502
+586:2:3514
+587:2:3515
+588:0:3995
+589:2:2768
+590:0:3995
+591:2:3521
+592:0:3995
+593:2:3825
+594:2:3826
+595:2:3830
+596:2:3834
+597:2:3835
+598:2:3839
+599:2:3847
+600:2:3848
+601:2:3852
+602:2:3856
+603:2:3857
+604:2:3852
+605:2:3856
+606:2:3857
+607:2:3861
+608:2:3868
+609:2:3875
+610:2:3876
+611:2:3883
+612:2:3888
+613:2:3895
+614:2:3896
+615:2:3895
+616:2:3896
+617:2:3903
+618:2:3907
+619:0:3995
+620:2:3523
+621:2:3533
+622:0:3995
+623:2:2768
+624:0:3995
+625:2:3524
+626:2:3525
+627:0:3995
+628:2:2768
+629:0:3995
+630:2:3529
+631:0:3995
+632:2:3537
+633:0:3995
+634:2:2764
+635:0:3995
+636:2:2766
+637:0:3995
+638:2:2767
+639:0:3995
+640:2:2768
+641:0:3995
+642:2:2769
+643:2:2770
+644:2:2774
+645:2:2775
+646:2:2783
+647:2:2784
+648:2:2788
+649:2:2789
+650:2:2797
+651:2:2802
+652:2:2806
+653:2:2807
+654:2:2815
+655:2:2816
+656:2:2820
+657:2:2821
+658:2:2815
+659:2:2816
+660:2:2817
+661:2:2829
+662:2:2834
+663:2:2841
+664:2:2842
+665:2:2849
+666:2:2854
+667:2:2861
+668:2:2862
+669:2:2861
+670:2:2862
+671:2:2869
+672:2:2879
+673:0:3995
+674:2:2768
+675:0:3995
+676:2:2931
+677:2:2932
+678:2:2933
+679:0:3995
+680:2:2768
+681:0:3995
+682:2:2938
+683:0:3995
+684:1:2
+685:0:3995
+686:1:8
+687:0:3995
+688:1:9
+689:0:3995
+690:1:10
+691:0:3995
+692:1:11
+693:0:3995
+694:1:12
+695:1:13
+696:1:17
+697:1:18
+698:1:26
+699:1:27
+700:1:31
+701:1:32
+702:1:40
+703:1:45
+704:1:49
+705:1:50
+706:1:58
+707:1:59
+708:1:63
+709:1:64
+710:1:58
+711:1:59
+712:1:63
+713:1:64
+714:1:72
+715:1:77
+716:1:84
+717:1:85
+718:1:92
+719:1:97
+720:1:104
+721:1:105
+722:1:104
+723:1:105
+724:1:112
+725:0:3995
+726:1:11
+727:0:3995
+728:1:123
+729:1:124
+730:0:3995
+731:1:11
+732:0:3995
+733:1:130
+734:1:131
+735:1:135
+736:1:136
+737:1:144
+738:1:145
+739:1:149
+740:1:150
+741:1:158
+742:1:163
+743:1:167
+744:1:168
+745:1:176
+746:1:177
+747:1:181
+748:1:182
+749:1:176
+750:1:177
+751:1:181
+752:1:182
+753:1:190
+754:1:195
+755:1:202
+756:1:203
+757:1:210
+758:1:215
+759:1:222
+760:1:223
+761:1:222
+762:1:223
+763:1:230
+764:0:3995
+765:1:11
+766:0:3995
+767:1:241
+768:1:242
+769:1:246
+770:1:247
+771:1:255
+772:1:256
+773:1:260
+774:1:261
+775:1:269
+776:1:274
+777:1:278
+778:1:279
+779:1:287
+780:1:288
+781:1:292
+782:1:293
+783:1:287
+784:1:288
+785:1:292
+786:1:293
+787:1:301
+788:1:306
+789:1:313
+790:1:314
+791:1:321
+792:1:326
+793:1:333
+794:1:334
+795:1:333
+796:1:334
+797:1:341
+798:1:350
+799:0:3995
+800:1:11
+801:0:3995
+802:1:468
+803:1:472
+804:1:473
+805:1:477
+806:1:478
+807:1:486
+808:1:494
+809:1:495
+810:1:499
+811:1:503
+812:1:504
+813:1:499
+814:1:503
+815:1:504
+816:1:508
+817:1:515
+818:1:522
+819:1:523
+820:1:530
+821:1:535
+822:1:542
+823:1:543
+824:1:542
+825:1:543
+826:1:550
+827:0:3995
+828:1:11
+829:0:3995
+830:1:560
+831:1:561
+832:1:565
+833:1:566
+834:1:574
+835:1:575
+836:1:579
+837:1:580
+838:1:588
+839:1:593
+840:1:597
+841:1:598
+842:1:606
+843:1:607
+844:1:611
+845:1:612
+846:1:606
+847:1:607
+848:1:611
+849:1:612
+850:1:620
+851:1:625
+852:1:632
+853:1:633
+854:1:640
+855:1:645
+856:1:652
+857:1:653
+858:1:652
+859:1:653
+860:1:660
+861:0:3995
+862:1:11
+863:0:3995
+864:1:671
+865:1:674
+866:1:675
+867:0:3995
+868:1:11
+869:0:3995
+870:1:678
+871:1:679
+872:1:683
+873:1:684
+874:1:692
+875:1:693
+876:1:697
+877:1:698
+878:1:706
+879:1:711
+880:1:715
+881:1:716
+882:1:724
+883:1:725
+884:1:729
+885:1:730
+886:1:724
+887:1:725
+888:1:729
+889:1:730
+890:1:738
+891:1:743
+892:1:750
+893:1:751
+894:1:758
+895:1:763
+896:1:770
+897:1:771
+898:1:770
+899:1:771
+900:1:778
+901:0:3995
+902:1:11
+903:0:3995
+904:1:902
+905:1:903
+906:1:907
+907:1:908
+908:1:916
+909:1:917
+910:1:921
+911:1:922
+912:1:930
+913:1:935
+914:1:939
+915:1:940
+916:1:948
+917:1:949
+918:1:953
+919:1:954
+920:1:948
+921:1:949
+922:1:953
+923:1:954
+924:1:962
+925:1:967
+926:1:974
+927:1:975
+928:1:982
+929:1:987
+930:1:994
+931:1:995
+932:1:994
+933:1:995
+934:1:1002
+935:1:1011
+936:1:1015
+937:0:3995
+938:1:11
+939:0:3995
+940:1:1016
+941:1:1017
+942:1:1021
+943:1:1022
+944:1:1030
+945:1:1031
+946:1:1032
+947:1:1044
+948:1:1049
+949:1:1053
+950:1:1054
+951:1:1062
+952:1:1063
+953:1:1067
+954:1:1068
+955:1:1062
+956:1:1063
+957:1:1067
+958:1:1068
+959:1:1076
+960:1:1081
+961:1:1088
+962:1:1089
+963:1:1096
+964:1:1101
+965:1:1108
+966:1:1109
+967:1:1108
+968:1:1109
+969:1:1116
+970:0:3995
+971:1:11
+972:0:3995
+973:1:1127
+974:0:3995
+975:1:2663
+976:1:2670
+977:1:2671
+978:1:2678
+979:1:2683
+980:1:2690
+981:1:2691
+982:1:2690
+983:1:2691
+984:1:2698
+985:1:2702
+986:0:3995
+987:2:3552
+988:2:3553
+989:2:3557
+990:2:3561
+991:2:3562
+992:2:3566
+993:2:3571
+994:2:3579
+995:2:3583
+996:2:3584
+997:2:3579
+998:2:3580
+999:2:3588
+1000:2:3595
+1001:2:3602
+1002:2:3603
+1003:2:3610
+1004:2:3615
+1005:2:3622
+1006:2:3623
+1007:2:3622
+1008:2:3623
+1009:2:3630
+1010:2:3634
+1011:0:3995
+1012:2:2940
+1013:2:3533
+1014:0:3995
+1015:2:2768
+1016:0:3995
+1017:2:2941
+1018:0:3995
+1019:2:2768
+1020:0:3995
+1021:2:2944
+1022:2:2945
+1023:2:2949
+1024:2:2950
+1025:2:2958
+1026:2:2959
+1027:2:2963
+1028:2:2964
+1029:2:2972
+1030:2:2977
+1031:2:2981
+1032:2:2982
+1033:2:2990
+1034:2:2991
+1035:2:2995
+1036:2:2996
+1037:2:2990
+1038:2:2991
+1039:2:2995
+1040:2:2996
+1041:2:3004
+1042:2:3009
+1043:2:3016
+1044:2:3017
+1045:2:3024
+1046:2:3029
+1047:2:3036
+1048:2:3037
+1049:2:3036
+1050:2:3037
+1051:2:3044
+1052:2:3053
+1053:0:3995
+1054:2:2768
+1055:0:3995
+1056:2:3057
+1057:2:3058
+1058:2:3059
+1059:2:3071
+1060:2:3072
+1061:2:3076
+1062:2:3077
+1063:2:3085
+1064:2:3090
+1065:2:3094
+1066:2:3095
+1067:2:3103
+1068:2:3104
+1069:2:3108
+1070:2:3109
+1071:2:3103
+1072:2:3104
+1073:2:3108
+1074:2:3109
+1075:2:3117
+1076:2:3122
+1077:2:3129
+1078:2:3130
+1079:2:3137
+1080:2:3142
+1081:2:3149
+1082:2:3150
+1083:2:3149
+1084:2:3150
+1085:2:3157
+1086:2:3168
+1087:0:3995
+1088:2:2768
+1089:0:3995
+1090:2:3174
+1091:0:3995
+1092:2:3643
+1093:2:3644
+1094:2:3648
+1095:2:3652
+1096:2:3653
+1097:2:3657
+1098:2:3665
+1099:2:3666
+1100:2:3670
+1101:2:3674
+1102:2:3675
+1103:2:3670
+1104:2:3674
+1105:2:3675
+1106:2:3679
+1107:2:3686
+1108:2:3693
+1109:2:3694
+1110:2:3701
+1111:2:3706
+1112:2:3713
+1113:2:3714
+1114:2:3713
+1115:2:3714
+1116:2:3721
+1117:2:3725
+1118:0:3995
+1119:2:3176
+1120:2:3177
+1121:0:3995
+1122:2:2768
+1123:0:3995
+1124:2:3178
+1125:2:3179
+1126:2:3183
+1127:2:3184
+1128:2:3192
+1129:2:3193
+1130:2:3197
+1131:2:3198
+1132:2:3206
+1133:2:3211
+1134:2:3215
+1135:2:3216
+1136:2:3224
+1137:2:3225
+1138:2:3229
+1139:2:3230
+1140:2:3224
+1141:2:3225
+1142:2:3229
+1143:2:3230
+1144:2:3238
+1145:2:3243
+1146:2:3250
+1147:2:3251
+1148:2:3258
+1149:2:3263
+1150:2:3270
+1151:2:3271
+1152:2:3270
+1153:2:3271
+1154:2:3278
+1155:0:3995
+1156:2:2768
+1157:0:3995
+1158:2:3057
+1159:2:3058
+1160:2:3062
+1161:2:3063
+1162:2:3071
+1163:2:3072
+1164:2:3076
+1165:2:3077
+1166:2:3085
+1167:2:3090
+1168:2:3094
+1169:2:3095
+1170:2:3103
+1171:2:3104
+1172:2:3108
+1173:2:3109
+1174:2:3103
+1175:2:3104
+1176:2:3108
+1177:2:3109
+1178:2:3117
+1179:2:3122
+1180:2:3129
+1181:2:3130
+1182:2:3137
+1183:2:3142
+1184:2:3149
+1185:2:3150
+1186:2:3149
+1187:2:3150
+1188:2:3157
+1189:2:3168
+1190:0:3995
+1191:2:2768
+1192:0:3995
+1193:2:3174
+1194:0:3995
+1195:2:3643
+1196:2:3644
+1197:2:3648
+1198:2:3652
+1199:2:3653
+1200:2:3657
+1201:2:3665
+1202:2:3666
+1203:2:3670
+1204:2:3674
+1205:2:3675
+1206:2:3670
+1207:2:3674
+1208:2:3675
+1209:2:3679
+1210:2:3686
+1211:2:3693
+1212:2:3694
+1213:2:3701
+1214:2:3706
+1215:2:3713
+1216:2:3714
+1217:2:3713
+1218:2:3714
+1219:2:3721
+1220:2:3725
+1221:0:3995
+1222:2:3176
+1223:2:3177
+1224:0:3995
+1225:2:2768
+1226:0:3995
+1227:2:3057
+1228:2:3058
+1229:2:3062
+1230:2:3063
+1231:2:3071
+1232:2:3072
+1233:2:3076
+1234:2:3077
+1235:2:3085
+1236:2:3090
+1237:2:3094
+1238:2:3095
+1239:2:3103
+1240:2:3104
+1241:2:3108
+1242:2:3109
+1243:2:3103
+1244:2:3104
+1245:2:3108
+1246:2:3109
+1247:2:3117
+1248:2:3122
+1249:2:3129
+1250:2:3130
+1251:2:3137
+1252:2:3142
+1253:2:3149
+1254:2:3150
+1255:2:3149
+1256:2:3150
+1257:2:3157
+1258:2:3168
+1259:0:3995
+1260:2:2768
+1261:0:3995
+1262:2:3174
+1263:0:3995
+1264:2:3643
+1265:2:3644
+1266:2:3648
+1267:2:3652
+1268:2:3653
+1269:2:3657
+1270:2:3665
+1271:2:3666
+1272:2:3670
+1273:2:3674
+1274:2:3675
+1275:2:3670
+1276:2:3674
+1277:2:3675
+1278:2:3679
+1279:2:3686
+1280:2:3693
+1281:2:3694
+1282:2:3701
+1283:2:3706
+1284:2:3713
+1285:2:3714
+1286:2:3713
+1287:2:3714
+1288:2:3721
+1289:2:3725
+1290:0:3995
+1291:1:1129
+1292:1:1130
+1293:0:3993
+1294:1:11
+1295:0:3999
+1296:1:2142
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.define b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.define
new file mode 100644 (file)
index 0000000..5e642ef
--- /dev/null
@@ -0,0 +1 @@
+#define SINGLE_FLIP
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.log b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.log
new file mode 100644 (file)
index 0000000..92cda73
--- /dev/null
@@ -0,0 +1,571 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_single_flip.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+Depth=    5162 States=    1e+06 Transitions= 1.79e+07 Memory=   550.334        t=     49 R=   2e+04
+pan: claim violated! (at depth 1059)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 5162, errors: 1
+  1708535 states, stored
+ 29763099 states, matched
+ 31471634 transitions (= stored+matched)
+4.8935629e+08 atomic steps
+hash conflicts:  14510834 (resolved)
+
+Stats on memory usage (in Megabytes):
+  189.009      equivalent memory usage for states (stored*(State-vector + overhead))
+  144.122      actual memory usage for states (compression: 76.25%)
+               state-vector as stored = 60 byte + 28 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  609.807      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 81, "(1)"
+       line 257, "pan.___", state 101, "(1)"
+       line 261, "pan.___", state 109, "(1)"
+       line 597, "pan.___", state 128, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 167, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 199, "(1)"
+       line 257, "pan.___", state 219, "(1)"
+       line 261, "pan.___", state 227, "(1)"
+       line 411, "pan.___", state 246, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 278, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 292, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 310, "(1)"
+       line 257, "pan.___", state 330, "(1)"
+       line 261, "pan.___", state 338, "(1)"
+       line 411, "pan.___", state 359, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 361, "(1)"
+       line 411, "pan.___", state 362, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 362, "else"
+       line 411, "pan.___", state 365, "(1)"
+       line 415, "pan.___", state 373, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 375, "(1)"
+       line 415, "pan.___", state 376, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 376, "else"
+       line 415, "pan.___", state 379, "(1)"
+       line 415, "pan.___", state 380, "(1)"
+       line 415, "pan.___", state 380, "(1)"
+       line 413, "pan.___", state 385, "((i<1))"
+       line 413, "pan.___", state 385, "((i>=1))"
+       line 420, "pan.___", state 391, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 393, "(1)"
+       line 420, "pan.___", state 394, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 394, "else"
+       line 420, "pan.___", state 397, "(1)"
+       line 420, "pan.___", state 398, "(1)"
+       line 420, "pan.___", state 398, "(1)"
+       line 424, "pan.___", state 405, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 407, "(1)"
+       line 424, "pan.___", state 408, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 408, "else"
+       line 424, "pan.___", state 411, "(1)"
+       line 424, "pan.___", state 412, "(1)"
+       line 424, "pan.___", state 412, "(1)"
+       line 422, "pan.___", state 417, "((i<2))"
+       line 422, "pan.___", state 417, "((i>=2))"
+       line 249, "pan.___", state 423, "(1)"
+       line 253, "pan.___", state 431, "(1)"
+       line 253, "pan.___", state 432, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 432, "else"
+       line 251, "pan.___", state 437, "((i<1))"
+       line 251, "pan.___", state 437, "((i>=1))"
+       line 257, "pan.___", state 443, "(1)"
+       line 257, "pan.___", state 444, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 444, "else"
+       line 261, "pan.___", state 451, "(1)"
+       line 261, "pan.___", state 452, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 452, "else"
+       line 259, "pan.___", state 457, "((i<2))"
+       line 259, "pan.___", state 457, "((i>=2))"
+       line 266, "pan.___", state 461, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 461, "else"
+       line 431, "pan.___", state 463, "(1)"
+       line 431, "pan.___", state 463, "(1)"
+       line 597, "pan.___", state 466, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 597, "pan.___", state 467, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 597, "pan.___", state 468, "(1)"
+       line 272, "pan.___", state 472, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 483, "(1)"
+       line 280, "pan.___", state 494, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 503, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 519, "(1)"
+       line 253, "pan.___", state 527, "(1)"
+       line 257, "pan.___", state 539, "(1)"
+       line 261, "pan.___", state 547, "(1)"
+       line 411, "pan.___", state 565, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 579, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 597, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 611, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 629, "(1)"
+       line 253, "pan.___", state 637, "(1)"
+       line 257, "pan.___", state 649, "(1)"
+       line 261, "pan.___", state 657, "(1)"
+       line 411, "pan.___", state 683, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 715, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 729, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 747, "(1)"
+       line 257, "pan.___", state 767, "(1)"
+       line 261, "pan.___", state 775, "(1)"
+       line 411, "pan.___", state 794, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 796, "(1)"
+       line 411, "pan.___", state 797, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 797, "else"
+       line 411, "pan.___", state 800, "(1)"
+       line 415, "pan.___", state 808, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 810, "(1)"
+       line 415, "pan.___", state 811, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 811, "else"
+       line 415, "pan.___", state 814, "(1)"
+       line 415, "pan.___", state 815, "(1)"
+       line 415, "pan.___", state 815, "(1)"
+       line 413, "pan.___", state 820, "((i<1))"
+       line 413, "pan.___", state 820, "((i>=1))"
+       line 420, "pan.___", state 826, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 828, "(1)"
+       line 420, "pan.___", state 829, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 829, "else"
+       line 420, "pan.___", state 832, "(1)"
+       line 420, "pan.___", state 833, "(1)"
+       line 420, "pan.___", state 833, "(1)"
+       line 424, "pan.___", state 840, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 842, "(1)"
+       line 424, "pan.___", state 843, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 843, "else"
+       line 424, "pan.___", state 846, "(1)"
+       line 424, "pan.___", state 847, "(1)"
+       line 424, "pan.___", state 847, "(1)"
+       line 422, "pan.___", state 852, "((i<2))"
+       line 422, "pan.___", state 852, "((i>=2))"
+       line 249, "pan.___", state 858, "(1)"
+       line 253, "pan.___", state 866, "(1)"
+       line 253, "pan.___", state 867, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 867, "else"
+       line 251, "pan.___", state 872, "((i<1))"
+       line 251, "pan.___", state 872, "((i>=1))"
+       line 257, "pan.___", state 878, "(1)"
+       line 257, "pan.___", state 879, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 879, "else"
+       line 261, "pan.___", state 886, "(1)"
+       line 261, "pan.___", state 887, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 887, "else"
+       line 259, "pan.___", state 892, "((i<2))"
+       line 259, "pan.___", state 892, "((i>=2))"
+       line 266, "pan.___", state 896, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 896, "else"
+       line 431, "pan.___", state 898, "(1)"
+       line 431, "pan.___", state 898, "(1)"
+       line 605, "pan.___", state 902, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 921, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 971, "(1)"
+       line 253, "pan.___", state 979, "(1)"
+       line 257, "pan.___", state 991, "(1)"
+       line 261, "pan.___", state 999, "(1)"
+       line 411, "pan.___", state 1021, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1053, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1067, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1085, "(1)"
+       line 257, "pan.___", state 1105, "(1)"
+       line 261, "pan.___", state 1113, "(1)"
+       line 411, "pan.___", state 1136, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1168, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1182, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1200, "(1)"
+       line 257, "pan.___", state 1220, "(1)"
+       line 261, "pan.___", state 1228, "(1)"
+       line 411, "pan.___", state 1247, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1279, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1293, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1311, "(1)"
+       line 257, "pan.___", state 1331, "(1)"
+       line 261, "pan.___", state 1339, "(1)"
+       line 272, "pan.___", state 1360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1382, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1391, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1407, "(1)"
+       line 253, "pan.___", state 1415, "(1)"
+       line 257, "pan.___", state 1427, "(1)"
+       line 261, "pan.___", state 1435, "(1)"
+       line 411, "pan.___", state 1453, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1467, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1485, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1499, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1517, "(1)"
+       line 253, "pan.___", state 1525, "(1)"
+       line 257, "pan.___", state 1537, "(1)"
+       line 261, "pan.___", state 1545, "(1)"
+       line 411, "pan.___", state 1564, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1578, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1596, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1610, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1628, "(1)"
+       line 253, "pan.___", state 1636, "(1)"
+       line 257, "pan.___", state 1648, "(1)"
+       line 261, "pan.___", state 1656, "(1)"
+       line 411, "pan.___", state 1678, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1710, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1724, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1742, "(1)"
+       line 257, "pan.___", state 1762, "(1)"
+       line 261, "pan.___", state 1770, "(1)"
+       line 644, "pan.___", state 1789, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 1796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1828, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1842, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1860, "(1)"
+       line 257, "pan.___", state 1880, "(1)"
+       line 261, "pan.___", state 1888, "(1)"
+       line 411, "pan.___", state 1907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1971, "(1)"
+       line 257, "pan.___", state 1991, "(1)"
+       line 261, "pan.___", state 1999, "(1)"
+       line 411, "pan.___", state 2020, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2022, "(1)"
+       line 411, "pan.___", state 2023, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2023, "else"
+       line 411, "pan.___", state 2026, "(1)"
+       line 415, "pan.___", state 2034, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2036, "(1)"
+       line 415, "pan.___", state 2037, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2037, "else"
+       line 415, "pan.___", state 2040, "(1)"
+       line 415, "pan.___", state 2041, "(1)"
+       line 415, "pan.___", state 2041, "(1)"
+       line 413, "pan.___", state 2046, "((i<1))"
+       line 413, "pan.___", state 2046, "((i>=1))"
+       line 420, "pan.___", state 2052, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2054, "(1)"
+       line 420, "pan.___", state 2055, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2055, "else"
+       line 420, "pan.___", state 2058, "(1)"
+       line 420, "pan.___", state 2059, "(1)"
+       line 420, "pan.___", state 2059, "(1)"
+       line 424, "pan.___", state 2066, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2068, "(1)"
+       line 424, "pan.___", state 2069, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2069, "else"
+       line 424, "pan.___", state 2072, "(1)"
+       line 424, "pan.___", state 2073, "(1)"
+       line 424, "pan.___", state 2073, "(1)"
+       line 422, "pan.___", state 2078, "((i<2))"
+       line 422, "pan.___", state 2078, "((i>=2))"
+       line 249, "pan.___", state 2084, "(1)"
+       line 253, "pan.___", state 2092, "(1)"
+       line 253, "pan.___", state 2093, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2093, "else"
+       line 251, "pan.___", state 2098, "((i<1))"
+       line 251, "pan.___", state 2098, "((i>=1))"
+       line 257, "pan.___", state 2104, "(1)"
+       line 257, "pan.___", state 2105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2105, "else"
+       line 261, "pan.___", state 2112, "(1)"
+       line 261, "pan.___", state 2113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2113, "else"
+       line 259, "pan.___", state 2118, "((i<2))"
+       line 259, "pan.___", state 2118, "((i>=2))"
+       line 266, "pan.___", state 2122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2122, "else"
+       line 431, "pan.___", state 2124, "(1)"
+       line 431, "pan.___", state 2124, "(1)"
+       line 644, "pan.___", state 2127, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 644, "pan.___", state 2128, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 644, "pan.___", state 2129, "(1)"
+       line 272, "pan.___", state 2133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2164, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2180, "(1)"
+       line 253, "pan.___", state 2188, "(1)"
+       line 257, "pan.___", state 2200, "(1)"
+       line 261, "pan.___", state 2208, "(1)"
+       line 411, "pan.___", state 2226, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2240, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2258, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2272, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2290, "(1)"
+       line 253, "pan.___", state 2298, "(1)"
+       line 257, "pan.___", state 2310, "(1)"
+       line 261, "pan.___", state 2318, "(1)"
+       line 272, "pan.___", state 2340, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2349, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2362, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2371, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2387, "(1)"
+       line 253, "pan.___", state 2395, "(1)"
+       line 257, "pan.___", state 2407, "(1)"
+       line 261, "pan.___", state 2415, "(1)"
+       line 411, "pan.___", state 2433, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2447, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2465, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2479, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2497, "(1)"
+       line 253, "pan.___", state 2505, "(1)"
+       line 257, "pan.___", state 2517, "(1)"
+       line 261, "pan.___", state 2525, "(1)"
+       line 411, "pan.___", state 2544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2558, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2576, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2590, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2608, "(1)"
+       line 253, "pan.___", state 2616, "(1)"
+       line 257, "pan.___", state 2628, "(1)"
+       line 261, "pan.___", state 2636, "(1)"
+       line 249, "pan.___", state 2667, "(1)"
+       line 257, "pan.___", state 2687, "(1)"
+       line 261, "pan.___", state 2695, "(1)"
+       line 249, "pan.___", state 2710, "(1)"
+       line 253, "pan.___", state 2718, "(1)"
+       line 257, "pan.___", state 2730, "(1)"
+       line 261, "pan.___", state 2738, "(1)"
+       line 898, "pan.___", state 2755, "-end-"
+       (259 of 2755 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 22, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 36, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 54, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 86, "(1)"
+       line 253, "pan.___", state 94, "(1)"
+       line 257, "pan.___", state 106, "(1)"
+       line 272, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 144, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 157, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 197, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 211, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 229, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 243, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 261, "(1)"
+       line 253, "pan.___", state 269, "(1)"
+       line 257, "pan.___", state 281, "(1)"
+       line 261, "pan.___", state 289, "(1)"
+       line 415, "pan.___", state 324, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 342, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 356, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 382, "(1)"
+       line 257, "pan.___", state 394, "(1)"
+       line 261, "pan.___", state 402, "(1)"
+       line 411, "pan.___", state 430, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 444, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 462, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 476, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 494, "(1)"
+       line 253, "pan.___", state 502, "(1)"
+       line 257, "pan.___", state 514, "(1)"
+       line 261, "pan.___", state 522, "(1)"
+       line 411, "pan.___", state 541, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 543, "(1)"
+       line 411, "pan.___", state 544, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 544, "else"
+       line 411, "pan.___", state 547, "(1)"
+       line 415, "pan.___", state 555, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 557, "(1)"
+       line 415, "pan.___", state 558, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 558, "else"
+       line 415, "pan.___", state 561, "(1)"
+       line 415, "pan.___", state 562, "(1)"
+       line 415, "pan.___", state 562, "(1)"
+       line 413, "pan.___", state 567, "((i<1))"
+       line 413, "pan.___", state 567, "((i>=1))"
+       line 420, "pan.___", state 573, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 575, "(1)"
+       line 420, "pan.___", state 576, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 576, "else"
+       line 420, "pan.___", state 579, "(1)"
+       line 420, "pan.___", state 580, "(1)"
+       line 420, "pan.___", state 580, "(1)"
+       line 424, "pan.___", state 587, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 589, "(1)"
+       line 424, "pan.___", state 590, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 590, "else"
+       line 424, "pan.___", state 593, "(1)"
+       line 424, "pan.___", state 594, "(1)"
+       line 424, "pan.___", state 594, "(1)"
+       line 422, "pan.___", state 599, "((i<2))"
+       line 422, "pan.___", state 599, "((i>=2))"
+       line 249, "pan.___", state 605, "(1)"
+       line 253, "pan.___", state 613, "(1)"
+       line 253, "pan.___", state 614, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 614, "else"
+       line 251, "pan.___", state 619, "((i<1))"
+       line 251, "pan.___", state 619, "((i>=1))"
+       line 257, "pan.___", state 625, "(1)"
+       line 257, "pan.___", state 626, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 626, "else"
+       line 261, "pan.___", state 633, "(1)"
+       line 261, "pan.___", state 634, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 634, "else"
+       line 259, "pan.___", state 639, "((i<2))"
+       line 259, "pan.___", state 639, "((i>=2))"
+       line 266, "pan.___", state 643, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 643, "else"
+       line 431, "pan.___", state 645, "(1)"
+       line 431, "pan.___", state 645, "(1)"
+       line 1117, "pan.___", state 649, "_proc_urcu_writer = (_proc_urcu_writer|(1<<10))"
+       line 411, "pan.___", state 654, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 656, "(1)"
+       line 411, "pan.___", state 657, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 657, "else"
+       line 411, "pan.___", state 660, "(1)"
+       line 415, "pan.___", state 668, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 670, "(1)"
+       line 415, "pan.___", state 671, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 671, "else"
+       line 415, "pan.___", state 674, "(1)"
+       line 415, "pan.___", state 675, "(1)"
+       line 415, "pan.___", state 675, "(1)"
+       line 413, "pan.___", state 680, "((i<1))"
+       line 413, "pan.___", state 680, "((i>=1))"
+       line 420, "pan.___", state 686, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 688, "(1)"
+       line 420, "pan.___", state 689, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 689, "else"
+       line 420, "pan.___", state 692, "(1)"
+       line 420, "pan.___", state 693, "(1)"
+       line 420, "pan.___", state 693, "(1)"
+       line 424, "pan.___", state 700, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 702, "(1)"
+       line 424, "pan.___", state 703, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 703, "else"
+       line 424, "pan.___", state 706, "(1)"
+       line 424, "pan.___", state 707, "(1)"
+       line 424, "pan.___", state 707, "(1)"
+       line 422, "pan.___", state 712, "((i<2))"
+       line 422, "pan.___", state 712, "((i>=2))"
+       line 249, "pan.___", state 718, "(1)"
+       line 253, "pan.___", state 726, "(1)"
+       line 253, "pan.___", state 727, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 727, "else"
+       line 251, "pan.___", state 732, "((i<1))"
+       line 251, "pan.___", state 732, "((i>=1))"
+       line 257, "pan.___", state 738, "(1)"
+       line 257, "pan.___", state 739, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 739, "else"
+       line 261, "pan.___", state 746, "(1)"
+       line 261, "pan.___", state 747, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 747, "else"
+       line 259, "pan.___", state 752, "((i<2))"
+       line 259, "pan.___", state 752, "((i>=2))"
+       line 266, "pan.___", state 756, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 756, "else"
+       line 431, "pan.___", state 758, "(1)"
+       line 431, "pan.___", state 758, "(1)"
+       line 1133, "pan.___", state 763, "_proc_urcu_writer = (_proc_urcu_writer|(1<<11))"
+       line 1128, "pan.___", state 764, "(((tmp2&((1<<7)-1))&&((tmp2^0)&(1<<7))))"
+       line 1128, "pan.___", state 764, "else"
+       line 1153, "pan.___", state 768, "_proc_urcu_writer = (_proc_urcu_writer&~(((1<<12)|(1<<11))))"
+       line 272, "pan.___", state 799, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 808, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 823, "(1)"
+       line 284, "pan.___", state 830, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 846, "(1)"
+       line 253, "pan.___", state 854, "(1)"
+       line 257, "pan.___", state 866, "(1)"
+       line 261, "pan.___", state 874, "(1)"
+       line 276, "pan.___", state 899, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 912, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 921, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 937, "(1)"
+       line 253, "pan.___", state 945, "(1)"
+       line 257, "pan.___", state 957, "(1)"
+       line 261, "pan.___", state 965, "(1)"
+       line 272, "pan.___", state 981, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 272, "pan.___", state 983, "(1)"
+       line 276, "pan.___", state 990, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 992, "(1)"
+       line 276, "pan.___", state 993, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 276, "pan.___", state 993, "else"
+       line 274, "pan.___", state 998, "((i<1))"
+       line 274, "pan.___", state 998, "((i>=1))"
+       line 280, "pan.___", state 1003, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1005, "(1)"
+       line 280, "pan.___", state 1006, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 280, "pan.___", state 1006, "else"
+       line 284, "pan.___", state 1012, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1014, "(1)"
+       line 284, "pan.___", state 1015, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 284, "pan.___", state 1015, "else"
+       line 282, "pan.___", state 1020, "((i<2))"
+       line 282, "pan.___", state 1020, "((i>=2))"
+       line 249, "pan.___", state 1028, "(1)"
+       line 253, "pan.___", state 1036, "(1)"
+       line 253, "pan.___", state 1037, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 1037, "else"
+       line 251, "pan.___", state 1042, "((i<1))"
+       line 251, "pan.___", state 1042, "((i>=1))"
+       line 257, "pan.___", state 1048, "(1)"
+       line 257, "pan.___", state 1049, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 1049, "else"
+       line 261, "pan.___", state 1056, "(1)"
+       line 261, "pan.___", state 1057, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 1057, "else"
+       line 266, "pan.___", state 1066, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 1066, "else"
+       line 299, "pan.___", state 1068, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 299, "pan.___", state 1068, "else"
+       line 276, "pan.___", state 1081, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1094, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1103, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1119, "(1)"
+       line 253, "pan.___", state 1127, "(1)"
+       line 257, "pan.___", state 1139, "(1)"
+       line 261, "pan.___", state 1147, "(1)"
+       line 1237, "pan.___", state 1162, "-end-"
+       (145 of 1162 states)
+unreached in proctype :init:
+       line 1248, "pan.___", state 9, "((j<2))"
+       line 1248, "pan.___", state 9, "((j>=2))"
+       line 1249, "pan.___", state 20, "((j<2))"
+       line 1249, "pan.___", state 20, "((j>=2))"
+       line 1254, "pan.___", state 33, "((j<2))"
+       line 1254, "pan.___", state 33, "((j>=2))"
+       line 1252, "pan.___", state 43, "((i<1))"
+       line 1252, "pan.___", state 43, "((i>=1))"
+       line 1262, "pan.___", state 54, "((j<2))"
+       line 1262, "pan.___", state 54, "((j>=2))"
+       line 1266, "pan.___", state 67, "((j<2))"
+       line 1266, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1300, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 86.8 seconds
+pan: rate 19690.388 states/second
+pan: avg transition delay 2.7571e-06 usec
+cp .input.spin urcu_free_single_flip.spin.input
+cp .input.spin.trail urcu_free_single_flip.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.spin.input b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.spin.input
new file mode 100644 (file)
index 0000000..e7189ef
--- /dev/null
@@ -0,0 +1,1273 @@
+#define SINGLE_FLIP
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.spin.input.trail b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_free_single_flip.spin.input.trail
new file mode 100644 (file)
index 0000000..6eb297f
--- /dev/null
@@ -0,0 +1,1062 @@
+-2:3:-2
+-4:-4:-4
+1:0:3997
+2:3:3917
+3:3:3920
+4:3:3920
+5:3:3923
+6:3:3931
+7:3:3931
+8:3:3934
+9:3:3940
+10:3:3944
+11:3:3944
+12:3:3947
+13:3:3957
+14:3:3965
+15:3:3965
+16:3:3968
+17:3:3974
+18:3:3978
+19:3:3978
+20:3:3981
+21:3:3987
+22:3:3991
+23:3:3992
+24:0:3997
+25:3:3994
+26:0:3997
+27:2:2757
+28:0:3997
+29:2:2763
+30:0:3997
+31:2:2764
+32:0:3997
+33:2:2766
+34:0:3997
+35:2:2767
+36:0:3997
+37:2:2768
+38:0:3997
+39:2:2769
+40:0:3997
+41:2:2770
+42:0:3997
+43:2:2771
+44:0:3997
+45:2:2772
+46:2:2773
+47:2:2777
+48:2:2778
+49:2:2786
+50:2:2787
+51:2:2791
+52:2:2792
+53:2:2800
+54:2:2805
+55:2:2809
+56:2:2810
+57:2:2818
+58:2:2819
+59:2:2823
+60:2:2824
+61:2:2818
+62:2:2819
+63:2:2823
+64:2:2824
+65:2:2832
+66:2:2837
+67:2:2844
+68:2:2845
+69:2:2852
+70:2:2857
+71:2:2864
+72:2:2865
+73:2:2864
+74:2:2865
+75:2:2872
+76:2:2882
+77:0:3997
+78:2:2771
+79:0:3997
+80:2:2886
+81:2:2890
+82:2:2891
+83:2:2895
+84:2:2899
+85:2:2900
+86:2:2904
+87:2:2912
+88:2:2913
+89:2:2917
+90:2:2921
+91:2:2922
+92:2:2917
+93:2:2918
+94:2:2926
+95:0:3997
+96:2:2771
+97:0:3997
+98:2:2934
+99:2:2935
+100:2:2936
+101:0:3997
+102:2:2771
+103:0:3997
+104:2:2941
+105:0:3997
+106:2:3554
+107:2:3555
+108:2:3559
+109:2:3563
+110:2:3564
+111:2:3568
+112:2:3573
+113:2:3581
+114:2:3585
+115:2:3586
+116:2:3581
+117:2:3585
+118:2:3586
+119:2:3590
+120:2:3597
+121:2:3604
+122:2:3605
+123:2:3612
+124:2:3617
+125:2:3624
+126:2:3625
+127:2:3624
+128:2:3625
+129:2:3632
+130:2:3636
+131:0:3997
+132:2:2943
+133:2:3535
+134:0:3997
+135:2:2771
+136:0:3997
+137:2:2944
+138:0:3997
+139:2:2771
+140:0:3997
+141:2:2947
+142:2:2948
+143:2:2952
+144:2:2953
+145:2:2961
+146:2:2962
+147:2:2966
+148:2:2967
+149:2:2975
+150:2:2980
+151:2:2984
+152:2:2985
+153:2:2993
+154:2:2994
+155:2:2998
+156:2:2999
+157:2:2993
+158:2:2994
+159:2:2998
+160:2:2999
+161:2:3007
+162:2:3012
+163:2:3019
+164:2:3020
+165:2:3027
+166:2:3032
+167:2:3039
+168:2:3040
+169:2:3039
+170:2:3040
+171:2:3047
+172:2:3056
+173:0:3997
+174:2:2771
+175:0:3997
+176:2:3060
+177:2:3069
+178:2:3070
+179:2:3074
+180:2:3075
+181:2:3079
+182:2:3080
+183:2:3088
+184:2:3093
+185:2:3097
+186:2:3098
+187:2:3106
+188:2:3107
+189:2:3111
+190:2:3112
+191:2:3106
+192:2:3107
+193:2:3111
+194:2:3112
+195:2:3120
+196:2:3127
+197:2:3128
+198:2:3132
+199:2:3133
+200:2:3140
+201:2:3145
+202:2:3152
+203:2:3153
+204:2:3152
+205:2:3153
+206:2:3160
+207:2:3172
+208:2:3173
+209:0:3997
+210:2:2771
+211:0:3997
+212:2:3523
+213:0:3997
+214:1:2
+215:0:3997
+216:1:8
+217:0:3997
+218:1:9
+219:0:3997
+220:1:10
+221:0:3997
+222:1:11
+223:0:3997
+224:1:12
+225:1:13
+226:1:17
+227:1:18
+228:1:26
+229:1:27
+230:1:31
+231:1:32
+232:1:40
+233:1:45
+234:1:49
+235:1:50
+236:1:58
+237:1:59
+238:1:63
+239:1:64
+240:1:58
+241:1:59
+242:1:63
+243:1:64
+244:1:72
+245:1:77
+246:1:84
+247:1:85
+248:1:92
+249:1:97
+250:1:104
+251:1:105
+252:1:104
+253:1:105
+254:1:112
+255:0:3997
+256:1:11
+257:0:3997
+258:1:123
+259:1:124
+260:0:3997
+261:1:11
+262:0:3997
+263:1:130
+264:1:131
+265:1:135
+266:1:136
+267:1:144
+268:1:145
+269:1:149
+270:1:150
+271:1:158
+272:1:163
+273:1:167
+274:1:168
+275:1:176
+276:1:177
+277:1:181
+278:1:182
+279:1:176
+280:1:177
+281:1:181
+282:1:182
+283:1:190
+284:1:195
+285:1:202
+286:1:203
+287:1:210
+288:1:215
+289:1:222
+290:1:223
+291:1:222
+292:1:223
+293:1:230
+294:0:3997
+295:1:11
+296:0:3997
+297:2:3824
+298:2:3832
+299:2:3836
+300:2:3837
+301:2:3841
+302:2:3849
+303:2:3850
+304:2:3854
+305:2:3858
+306:2:3859
+307:2:3854
+308:2:3858
+309:2:3859
+310:2:3863
+311:2:3870
+312:2:3877
+313:2:3878
+314:2:3885
+315:2:3890
+316:2:3897
+317:2:3898
+318:2:3897
+319:2:3898
+320:2:3905
+321:2:3909
+322:0:3997
+323:2:3525
+324:2:3535
+325:0:3997
+326:2:2771
+327:0:3997
+328:2:3526
+329:2:3527
+330:0:3997
+331:2:2771
+332:0:3997
+333:2:3531
+334:0:3997
+335:2:3539
+336:0:3997
+337:2:2764
+338:0:3997
+339:2:2766
+340:0:3997
+341:2:2767
+342:0:3997
+343:2:2768
+344:0:3997
+345:2:2769
+346:0:3997
+347:2:2770
+348:0:3997
+349:2:2771
+350:0:3997
+351:2:2772
+352:2:2773
+353:2:2777
+354:2:2778
+355:2:2786
+356:2:2787
+357:2:2791
+358:2:2792
+359:2:2800
+360:2:2805
+361:2:2809
+362:2:2810
+363:2:2818
+364:2:2819
+365:2:2820
+366:2:2818
+367:2:2819
+368:2:2823
+369:2:2824
+370:2:2832
+371:2:2837
+372:2:2844
+373:2:2845
+374:2:2852
+375:2:2857
+376:2:2864
+377:2:2865
+378:2:2864
+379:2:2865
+380:2:2872
+381:2:2882
+382:0:3997
+383:2:2771
+384:0:3997
+385:2:2886
+386:2:2890
+387:2:2891
+388:2:2895
+389:2:2899
+390:2:2900
+391:2:2904
+392:2:2912
+393:2:2913
+394:2:2917
+395:2:2918
+396:2:2917
+397:2:2921
+398:2:2922
+399:2:2926
+400:0:3997
+401:2:2771
+402:0:3997
+403:2:2934
+404:2:2935
+405:2:2936
+406:0:3997
+407:2:2771
+408:0:3997
+409:2:2941
+410:0:3997
+411:1:241
+412:1:242
+413:1:246
+414:1:247
+415:1:255
+416:1:256
+417:1:260
+418:1:261
+419:1:269
+420:1:274
+421:1:278
+422:1:279
+423:1:287
+424:1:288
+425:1:292
+426:1:293
+427:1:287
+428:1:288
+429:1:292
+430:1:293
+431:1:301
+432:1:306
+433:1:313
+434:1:314
+435:1:321
+436:1:326
+437:1:333
+438:1:334
+439:1:333
+440:1:334
+441:1:341
+442:1:350
+443:0:3997
+444:1:11
+445:0:3997
+446:1:468
+447:1:472
+448:1:473
+449:1:477
+450:1:478
+451:1:486
+452:1:494
+453:1:495
+454:1:499
+455:1:503
+456:1:504
+457:1:499
+458:1:503
+459:1:504
+460:1:508
+461:1:515
+462:1:522
+463:1:523
+464:1:530
+465:1:535
+466:1:542
+467:1:543
+468:1:542
+469:1:543
+470:1:550
+471:0:3997
+472:1:11
+473:0:3997
+474:1:560
+475:1:561
+476:1:565
+477:1:566
+478:1:574
+479:1:575
+480:1:579
+481:1:580
+482:1:588
+483:1:593
+484:1:597
+485:1:598
+486:1:606
+487:1:607
+488:1:611
+489:1:612
+490:1:606
+491:1:607
+492:1:611
+493:1:612
+494:1:620
+495:1:625
+496:1:632
+497:1:633
+498:1:640
+499:1:645
+500:1:652
+501:1:653
+502:1:652
+503:1:653
+504:1:660
+505:0:3997
+506:1:11
+507:0:3997
+508:1:671
+509:1:674
+510:1:675
+511:0:3997
+512:1:11
+513:0:3997
+514:1:678
+515:1:679
+516:1:683
+517:1:684
+518:1:692
+519:1:693
+520:1:697
+521:1:698
+522:1:706
+523:1:711
+524:1:715
+525:1:716
+526:1:724
+527:1:725
+528:1:729
+529:1:730
+530:1:724
+531:1:725
+532:1:729
+533:1:730
+534:1:738
+535:1:743
+536:1:750
+537:1:751
+538:1:758
+539:1:763
+540:1:770
+541:1:771
+542:1:770
+543:1:771
+544:1:778
+545:0:3997
+546:1:11
+547:0:3997
+548:1:902
+549:1:903
+550:1:907
+551:1:908
+552:1:916
+553:1:917
+554:1:921
+555:1:922
+556:1:930
+557:1:935
+558:1:939
+559:1:940
+560:1:948
+561:1:949
+562:1:953
+563:1:954
+564:1:948
+565:1:949
+566:1:953
+567:1:954
+568:1:962
+569:1:967
+570:1:974
+571:1:975
+572:1:982
+573:1:987
+574:1:994
+575:1:995
+576:1:994
+577:1:995
+578:1:1002
+579:1:1011
+580:1:1015
+581:0:3997
+582:1:11
+583:0:3997
+584:1:1016
+585:1:1017
+586:1:1021
+587:1:1022
+588:1:1030
+589:1:1031
+590:1:1032
+591:1:1044
+592:1:1049
+593:1:1053
+594:1:1054
+595:1:1062
+596:1:1063
+597:1:1067
+598:1:1068
+599:1:1062
+600:1:1063
+601:1:1067
+602:1:1068
+603:1:1076
+604:1:1081
+605:1:1088
+606:1:1089
+607:1:1096
+608:1:1101
+609:1:1108
+610:1:1109
+611:1:1108
+612:1:1109
+613:1:1116
+614:0:3997
+615:1:11
+616:0:3997
+617:2:3554
+618:2:3555
+619:2:3559
+620:2:3563
+621:2:3564
+622:2:3568
+623:2:3573
+624:2:3581
+625:2:3585
+626:2:3586
+627:2:3581
+628:2:3585
+629:2:3586
+630:2:3590
+631:2:3597
+632:2:3604
+633:2:3605
+634:2:3612
+635:2:3617
+636:2:3624
+637:2:3625
+638:2:3624
+639:2:3625
+640:2:3632
+641:2:3636
+642:0:3997
+643:2:2943
+644:2:3535
+645:0:3997
+646:2:2771
+647:0:3997
+648:2:2944
+649:0:3997
+650:2:2771
+651:0:3997
+652:2:2947
+653:2:2948
+654:2:2952
+655:2:2953
+656:2:2961
+657:2:2962
+658:2:2966
+659:2:2967
+660:2:2975
+661:2:2980
+662:2:2984
+663:2:2985
+664:2:2993
+665:2:2994
+666:2:2998
+667:2:2999
+668:2:2993
+669:2:2994
+670:2:2998
+671:2:2999
+672:2:3007
+673:2:3012
+674:2:3019
+675:2:3020
+676:2:3027
+677:2:3032
+678:2:3039
+679:2:3040
+680:2:3039
+681:2:3040
+682:2:3047
+683:2:3056
+684:0:3997
+685:2:2771
+686:0:3997
+687:2:3060
+688:2:3061
+689:2:3062
+690:2:3074
+691:2:3075
+692:2:3079
+693:2:3080
+694:2:3088
+695:2:3093
+696:2:3097
+697:2:3098
+698:2:3106
+699:2:3107
+700:2:3111
+701:2:3112
+702:2:3106
+703:2:3107
+704:2:3111
+705:2:3112
+706:2:3120
+707:2:3125
+708:2:3132
+709:2:3133
+710:2:3140
+711:2:3145
+712:2:3152
+713:2:3153
+714:2:3152
+715:2:3153
+716:2:3160
+717:2:3172
+718:2:3173
+719:0:3997
+720:2:2771
+721:0:3997
+722:2:3523
+723:0:3997
+724:2:3827
+725:2:3828
+726:2:3832
+727:2:3836
+728:2:3837
+729:2:3841
+730:2:3849
+731:2:3850
+732:2:3854
+733:2:3858
+734:2:3859
+735:2:3854
+736:2:3858
+737:2:3859
+738:2:3863
+739:2:3870
+740:2:3877
+741:2:3878
+742:2:3885
+743:2:3890
+744:2:3897
+745:2:3898
+746:2:3897
+747:2:3898
+748:2:3905
+749:2:3909
+750:0:3997
+751:2:3525
+752:2:3535
+753:0:3997
+754:2:2771
+755:0:3997
+756:2:3526
+757:2:3527
+758:0:3997
+759:2:2771
+760:0:3997
+761:2:3531
+762:0:3997
+763:2:3539
+764:0:3997
+765:2:2764
+766:0:3997
+767:2:2766
+768:0:3997
+769:2:2767
+770:0:3997
+771:2:2768
+772:0:3997
+773:2:2769
+774:0:3997
+775:2:2770
+776:0:3997
+777:2:2771
+778:0:3997
+779:2:2772
+780:2:2773
+781:2:2777
+782:2:2778
+783:2:2786
+784:2:2787
+785:2:2791
+786:2:2792
+787:2:2800
+788:2:2805
+789:2:2809
+790:2:2810
+791:2:2818
+792:2:2819
+793:2:2823
+794:2:2824
+795:2:2818
+796:2:2819
+797:2:2820
+798:2:2832
+799:2:2837
+800:2:2844
+801:2:2845
+802:2:2852
+803:2:2857
+804:2:2864
+805:2:2865
+806:2:2864
+807:2:2865
+808:2:2872
+809:2:2882
+810:0:3997
+811:2:2771
+812:0:3997
+813:1:1127
+814:0:3997
+815:1:2663
+816:1:2670
+817:1:2671
+818:1:2678
+819:1:2683
+820:1:2690
+821:1:2691
+822:1:2690
+823:1:2691
+824:1:2698
+825:1:2702
+826:0:3997
+827:2:2886
+828:2:2890
+829:2:2891
+830:2:2895
+831:2:2899
+832:2:2900
+833:2:2904
+834:2:2912
+835:2:2913
+836:2:2917
+837:2:2921
+838:2:2922
+839:2:2917
+840:2:2918
+841:2:2926
+842:0:3997
+843:2:2771
+844:0:3997
+845:2:2934
+846:2:2935
+847:2:2936
+848:0:3997
+849:2:2771
+850:0:3997
+851:2:2941
+852:0:3997
+853:2:3554
+854:2:3555
+855:2:3559
+856:2:3563
+857:2:3564
+858:2:3568
+859:2:3573
+860:2:3581
+861:2:3585
+862:2:3586
+863:2:3581
+864:2:3585
+865:2:3586
+866:2:3590
+867:2:3597
+868:2:3604
+869:2:3605
+870:2:3612
+871:2:3617
+872:2:3624
+873:2:3625
+874:2:3624
+875:2:3625
+876:2:3632
+877:2:3636
+878:0:3997
+879:2:2943
+880:2:3535
+881:0:3997
+882:2:2771
+883:0:3997
+884:2:2944
+885:0:3997
+886:2:2771
+887:0:3997
+888:2:2947
+889:2:2948
+890:2:2952
+891:2:2953
+892:2:2961
+893:2:2962
+894:2:2966
+895:2:2967
+896:2:2975
+897:2:2980
+898:2:2984
+899:2:2985
+900:2:2993
+901:2:2994
+902:2:2998
+903:2:2999
+904:2:2993
+905:2:2994
+906:2:2998
+907:2:2999
+908:2:3007
+909:2:3012
+910:2:3019
+911:2:3020
+912:2:3027
+913:2:3032
+914:2:3039
+915:2:3040
+916:2:3039
+917:2:3040
+918:2:3047
+919:2:3056
+920:0:3997
+921:2:2771
+922:0:3997
+923:2:3060
+924:2:3061
+925:2:3062
+926:2:3074
+927:2:3075
+928:2:3079
+929:2:3080
+930:2:3088
+931:2:3093
+932:2:3097
+933:2:3098
+934:2:3106
+935:2:3107
+936:2:3111
+937:2:3112
+938:2:3106
+939:2:3107
+940:2:3111
+941:2:3112
+942:2:3120
+943:2:3125
+944:2:3132
+945:2:3133
+946:2:3140
+947:2:3145
+948:2:3152
+949:2:3153
+950:2:3152
+951:2:3153
+952:2:3160
+953:2:3170
+954:0:3997
+955:2:2771
+956:0:3997
+957:2:3176
+958:0:3997
+959:2:3645
+960:2:3646
+961:2:3650
+962:2:3654
+963:2:3655
+964:2:3659
+965:2:3667
+966:2:3668
+967:2:3672
+968:2:3676
+969:2:3677
+970:2:3672
+971:2:3676
+972:2:3677
+973:2:3681
+974:2:3688
+975:2:3695
+976:2:3696
+977:2:3703
+978:2:3708
+979:2:3715
+980:2:3716
+981:2:3715
+982:2:3716
+983:2:3723
+984:2:3727
+985:0:3997
+986:2:3178
+987:2:3179
+988:0:3997
+989:2:2771
+990:0:3997
+991:2:3060
+992:2:3061
+993:2:3065
+994:2:3066
+995:2:3074
+996:2:3075
+997:2:3079
+998:2:3080
+999:2:3088
+1000:2:3093
+1001:2:3097
+1002:2:3098
+1003:2:3106
+1004:2:3107
+1005:2:3111
+1006:2:3112
+1007:2:3106
+1008:2:3107
+1009:2:3111
+1010:2:3112
+1011:2:3120
+1012:2:3125
+1013:2:3132
+1014:2:3133
+1015:2:3140
+1016:2:3145
+1017:2:3152
+1018:2:3153
+1019:2:3152
+1020:2:3153
+1021:2:3160
+1022:2:3170
+1023:0:3997
+1024:2:2771
+1025:0:3997
+1026:2:3176
+1027:0:3997
+1028:2:3645
+1029:2:3646
+1030:2:3650
+1031:2:3654
+1032:2:3655
+1033:2:3659
+1034:2:3667
+1035:2:3668
+1036:2:3672
+1037:2:3676
+1038:2:3677
+1039:2:3672
+1040:2:3676
+1041:2:3677
+1042:2:3681
+1043:2:3688
+1044:2:3695
+1045:2:3696
+1046:2:3703
+1047:2:3708
+1048:2:3715
+1049:2:3716
+1050:2:3715
+1051:2:3716
+1052:2:3723
+1053:2:3727
+1054:0:3997
+1055:1:1129
+1056:1:1130
+1057:0:3995
+1058:1:11
+1059:0:4001
+1060:1:1715
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress.ltl b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress.ltl
new file mode 100644 (file)
index 0000000..8718641
--- /dev/null
@@ -0,0 +1 @@
+([] <> !np_)
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_reader.define b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_reader.define
new file mode 100644 (file)
index 0000000..ff3f783
--- /dev/null
@@ -0,0 +1 @@
+#define READER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_reader.log b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_reader.log
new file mode 100644 (file)
index 0000000..c286056
--- /dev/null
@@ -0,0 +1,462 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_reader.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+depth 23: Claim reached state 9 (line 1300)
+depth 1053: Claim reached state 9 (line 1299)
+Depth=    6090 States=    1e+06 Transitions= 2.22e+07 Memory=   510.295        t=   64.5 R=   2e+04
+Depth=    6090 States=    2e+06 Transitions= 5.32e+07 Memory=   552.776        t=    157 R=   1e+04
+Depth=    6090 States=    3e+06 Transitions= 8.56e+07 Memory=   597.014        t=    253 R=   1e+04
+pan: resizing hashtable to -w22..  done
+Depth=    6090 States=    4e+06 Transitions= 1.18e+08 Memory=   672.764        t=    351 R=   1e+04
+Depth=    6090 States=    5e+06 Transitions= 1.49e+08 Memory=   716.416        t=    443 R=   1e+04
+Depth=    6090 States=    6e+06 Transitions= 1.79e+08 Memory=   760.459        t=    531 R=   1e+04
+Depth=    6090 States=    7e+06 Transitions= 2.11e+08 Memory=   804.990        t=    627 R=   1e+04
+Depth=    6090 States=    8e+06 Transitions= 2.44e+08 Memory=   848.057        t=    727 R=   1e+04
+Depth=    6090 States=    9e+06 Transitions= 2.77e+08 Memory=   891.807        t=    827 R=   1e+04
+pan: resizing hashtable to -w24..  done
+Depth=    6090 States=    1e+07 Transitions= 3.11e+08 Memory=  1062.385        t=    928 R=   1e+04
+Depth=    6090 States=  1.1e+07 Transitions= 3.41e+08 Memory=  1105.744        t= 1.02e+03 R=   1e+04
+Depth=    6090 States=  1.2e+07 Transitions=  3.7e+08 Memory=  1151.252        t= 1.1e+03 R=   1e+04
+Depth=    6090 States=  1.3e+07 Transitions= 4.06e+08 Memory=  1193.635        t= 1.21e+03 R=   1e+04
+Depth=    6090 States=  1.4e+07 Transitions=  4.4e+08 Memory=  1236.506        t= 1.31e+03 R=   1e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 6090, errors: 0
+  7774591 states, stored (1.48046e+07 visited)
+4.51403e+08 states, matched
+4.6620756e+08 transitions (= visited+matched)
+7.4729019e+09 atomic steps
+hash conflicts: 1.6384912e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+  860.074      equivalent memory usage for states (stored*(State-vector + overhead))
+  689.043      actual memory usage for states (compression: 80.11%)
+               state-vector as stored = 65 byte + 28 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+ 1274.299      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 81, "(1)"
+       line 257, "pan.___", state 101, "(1)"
+       line 261, "pan.___", state 109, "(1)"
+       line 597, "pan.___", state 128, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 167, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 199, "(1)"
+       line 257, "pan.___", state 219, "(1)"
+       line 261, "pan.___", state 227, "(1)"
+       line 411, "pan.___", state 246, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 278, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 292, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 310, "(1)"
+       line 257, "pan.___", state 330, "(1)"
+       line 261, "pan.___", state 338, "(1)"
+       line 411, "pan.___", state 359, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 361, "(1)"
+       line 411, "pan.___", state 362, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 362, "else"
+       line 411, "pan.___", state 365, "(1)"
+       line 415, "pan.___", state 373, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 375, "(1)"
+       line 415, "pan.___", state 376, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 376, "else"
+       line 415, "pan.___", state 379, "(1)"
+       line 415, "pan.___", state 380, "(1)"
+       line 415, "pan.___", state 380, "(1)"
+       line 413, "pan.___", state 385, "((i<1))"
+       line 413, "pan.___", state 385, "((i>=1))"
+       line 420, "pan.___", state 391, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 393, "(1)"
+       line 420, "pan.___", state 394, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 394, "else"
+       line 420, "pan.___", state 397, "(1)"
+       line 420, "pan.___", state 398, "(1)"
+       line 420, "pan.___", state 398, "(1)"
+       line 424, "pan.___", state 405, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 407, "(1)"
+       line 424, "pan.___", state 408, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 408, "else"
+       line 424, "pan.___", state 411, "(1)"
+       line 424, "pan.___", state 412, "(1)"
+       line 424, "pan.___", state 412, "(1)"
+       line 422, "pan.___", state 417, "((i<2))"
+       line 422, "pan.___", state 417, "((i>=2))"
+       line 249, "pan.___", state 423, "(1)"
+       line 253, "pan.___", state 431, "(1)"
+       line 253, "pan.___", state 432, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 432, "else"
+       line 251, "pan.___", state 437, "((i<1))"
+       line 251, "pan.___", state 437, "((i>=1))"
+       line 257, "pan.___", state 443, "(1)"
+       line 257, "pan.___", state 444, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 444, "else"
+       line 261, "pan.___", state 451, "(1)"
+       line 261, "pan.___", state 452, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 452, "else"
+       line 259, "pan.___", state 457, "((i<2))"
+       line 259, "pan.___", state 457, "((i>=2))"
+       line 266, "pan.___", state 461, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 461, "else"
+       line 431, "pan.___", state 463, "(1)"
+       line 431, "pan.___", state 463, "(1)"
+       line 597, "pan.___", state 466, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 597, "pan.___", state 467, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 597, "pan.___", state 468, "(1)"
+       line 272, "pan.___", state 472, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 483, "(1)"
+       line 280, "pan.___", state 494, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 503, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 519, "(1)"
+       line 253, "pan.___", state 527, "(1)"
+       line 257, "pan.___", state 539, "(1)"
+       line 261, "pan.___", state 547, "(1)"
+       line 411, "pan.___", state 565, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 579, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 597, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 611, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 629, "(1)"
+       line 253, "pan.___", state 637, "(1)"
+       line 257, "pan.___", state 649, "(1)"
+       line 261, "pan.___", state 657, "(1)"
+       line 411, "pan.___", state 683, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 715, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 729, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 747, "(1)"
+       line 257, "pan.___", state 767, "(1)"
+       line 261, "pan.___", state 775, "(1)"
+       line 411, "pan.___", state 794, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 796, "(1)"
+       line 411, "pan.___", state 797, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 797, "else"
+       line 411, "pan.___", state 800, "(1)"
+       line 415, "pan.___", state 808, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 810, "(1)"
+       line 415, "pan.___", state 811, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 811, "else"
+       line 415, "pan.___", state 814, "(1)"
+       line 415, "pan.___", state 815, "(1)"
+       line 415, "pan.___", state 815, "(1)"
+       line 413, "pan.___", state 820, "((i<1))"
+       line 413, "pan.___", state 820, "((i>=1))"
+       line 420, "pan.___", state 826, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 828, "(1)"
+       line 420, "pan.___", state 829, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 829, "else"
+       line 420, "pan.___", state 832, "(1)"
+       line 420, "pan.___", state 833, "(1)"
+       line 420, "pan.___", state 833, "(1)"
+       line 424, "pan.___", state 840, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 842, "(1)"
+       line 424, "pan.___", state 843, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 843, "else"
+       line 424, "pan.___", state 846, "(1)"
+       line 424, "pan.___", state 847, "(1)"
+       line 424, "pan.___", state 847, "(1)"
+       line 422, "pan.___", state 852, "((i<2))"
+       line 422, "pan.___", state 852, "((i>=2))"
+       line 249, "pan.___", state 858, "(1)"
+       line 253, "pan.___", state 866, "(1)"
+       line 253, "pan.___", state 867, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 867, "else"
+       line 251, "pan.___", state 872, "((i<1))"
+       line 251, "pan.___", state 872, "((i>=1))"
+       line 257, "pan.___", state 878, "(1)"
+       line 257, "pan.___", state 879, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 879, "else"
+       line 261, "pan.___", state 886, "(1)"
+       line 261, "pan.___", state 887, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 887, "else"
+       line 259, "pan.___", state 892, "((i<2))"
+       line 259, "pan.___", state 892, "((i>=2))"
+       line 266, "pan.___", state 896, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 896, "else"
+       line 431, "pan.___", state 898, "(1)"
+       line 431, "pan.___", state 898, "(1)"
+       line 605, "pan.___", state 902, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 921, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 971, "(1)"
+       line 253, "pan.___", state 979, "(1)"
+       line 257, "pan.___", state 991, "(1)"
+       line 261, "pan.___", state 999, "(1)"
+       line 411, "pan.___", state 1021, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1053, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1067, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1085, "(1)"
+       line 257, "pan.___", state 1105, "(1)"
+       line 261, "pan.___", state 1113, "(1)"
+       line 411, "pan.___", state 1136, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1168, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1182, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1200, "(1)"
+       line 257, "pan.___", state 1220, "(1)"
+       line 261, "pan.___", state 1228, "(1)"
+       line 411, "pan.___", state 1247, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1279, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1293, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1311, "(1)"
+       line 257, "pan.___", state 1331, "(1)"
+       line 261, "pan.___", state 1339, "(1)"
+       line 272, "pan.___", state 1360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1382, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1391, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1407, "(1)"
+       line 253, "pan.___", state 1415, "(1)"
+       line 257, "pan.___", state 1427, "(1)"
+       line 261, "pan.___", state 1435, "(1)"
+       line 411, "pan.___", state 1453, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1467, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1485, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1499, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1517, "(1)"
+       line 253, "pan.___", state 1525, "(1)"
+       line 257, "pan.___", state 1537, "(1)"
+       line 261, "pan.___", state 1545, "(1)"
+       line 411, "pan.___", state 1564, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1578, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1596, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1610, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1628, "(1)"
+       line 253, "pan.___", state 1636, "(1)"
+       line 257, "pan.___", state 1648, "(1)"
+       line 261, "pan.___", state 1656, "(1)"
+       line 411, "pan.___", state 1678, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1710, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1724, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1742, "(1)"
+       line 257, "pan.___", state 1762, "(1)"
+       line 261, "pan.___", state 1770, "(1)"
+       line 644, "pan.___", state 1789, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 1796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1828, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1842, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1860, "(1)"
+       line 257, "pan.___", state 1880, "(1)"
+       line 261, "pan.___", state 1888, "(1)"
+       line 411, "pan.___", state 1907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1971, "(1)"
+       line 257, "pan.___", state 1991, "(1)"
+       line 261, "pan.___", state 1999, "(1)"
+       line 411, "pan.___", state 2020, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2022, "(1)"
+       line 411, "pan.___", state 2023, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2023, "else"
+       line 411, "pan.___", state 2026, "(1)"
+       line 415, "pan.___", state 2034, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2036, "(1)"
+       line 415, "pan.___", state 2037, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2037, "else"
+       line 415, "pan.___", state 2040, "(1)"
+       line 415, "pan.___", state 2041, "(1)"
+       line 415, "pan.___", state 2041, "(1)"
+       line 413, "pan.___", state 2046, "((i<1))"
+       line 413, "pan.___", state 2046, "((i>=1))"
+       line 420, "pan.___", state 2052, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2054, "(1)"
+       line 420, "pan.___", state 2055, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2055, "else"
+       line 420, "pan.___", state 2058, "(1)"
+       line 420, "pan.___", state 2059, "(1)"
+       line 420, "pan.___", state 2059, "(1)"
+       line 424, "pan.___", state 2066, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2068, "(1)"
+       line 424, "pan.___", state 2069, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2069, "else"
+       line 424, "pan.___", state 2072, "(1)"
+       line 424, "pan.___", state 2073, "(1)"
+       line 424, "pan.___", state 2073, "(1)"
+       line 422, "pan.___", state 2078, "((i<2))"
+       line 422, "pan.___", state 2078, "((i>=2))"
+       line 249, "pan.___", state 2084, "(1)"
+       line 253, "pan.___", state 2092, "(1)"
+       line 253, "pan.___", state 2093, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2093, "else"
+       line 251, "pan.___", state 2098, "((i<1))"
+       line 251, "pan.___", state 2098, "((i>=1))"
+       line 257, "pan.___", state 2104, "(1)"
+       line 257, "pan.___", state 2105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2105, "else"
+       line 261, "pan.___", state 2112, "(1)"
+       line 261, "pan.___", state 2113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2113, "else"
+       line 259, "pan.___", state 2118, "((i<2))"
+       line 259, "pan.___", state 2118, "((i>=2))"
+       line 266, "pan.___", state 2122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2122, "else"
+       line 431, "pan.___", state 2124, "(1)"
+       line 431, "pan.___", state 2124, "(1)"
+       line 644, "pan.___", state 2127, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 644, "pan.___", state 2128, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 644, "pan.___", state 2129, "(1)"
+       line 272, "pan.___", state 2133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2164, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2180, "(1)"
+       line 253, "pan.___", state 2188, "(1)"
+       line 257, "pan.___", state 2200, "(1)"
+       line 261, "pan.___", state 2208, "(1)"
+       line 411, "pan.___", state 2226, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2240, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2258, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2272, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2290, "(1)"
+       line 253, "pan.___", state 2298, "(1)"
+       line 257, "pan.___", state 2310, "(1)"
+       line 261, "pan.___", state 2318, "(1)"
+       line 272, "pan.___", state 2340, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2349, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2362, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2371, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2387, "(1)"
+       line 253, "pan.___", state 2395, "(1)"
+       line 257, "pan.___", state 2407, "(1)"
+       line 261, "pan.___", state 2415, "(1)"
+       line 411, "pan.___", state 2433, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2447, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2465, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2479, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2497, "(1)"
+       line 253, "pan.___", state 2505, "(1)"
+       line 257, "pan.___", state 2517, "(1)"
+       line 261, "pan.___", state 2525, "(1)"
+       line 411, "pan.___", state 2544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2558, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2576, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2590, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2608, "(1)"
+       line 253, "pan.___", state 2616, "(1)"
+       line 257, "pan.___", state 2628, "(1)"
+       line 261, "pan.___", state 2636, "(1)"
+       line 249, "pan.___", state 2667, "(1)"
+       line 257, "pan.___", state 2687, "(1)"
+       line 261, "pan.___", state 2695, "(1)"
+       line 249, "pan.___", state 2710, "(1)"
+       line 253, "pan.___", state 2718, "(1)"
+       line 257, "pan.___", state 2730, "(1)"
+       line 261, "pan.___", state 2738, "(1)"
+       line 898, "pan.___", state 2755, "-end-"
+       (259 of 2755 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 82, "(1)"
+       line 253, "pan.___", state 90, "(1)"
+       line 257, "pan.___", state 102, "(1)"
+       line 272, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 257, "(1)"
+       line 253, "pan.___", state 265, "(1)"
+       line 257, "pan.___", state 277, "(1)"
+       line 261, "pan.___", state 285, "(1)"
+       line 415, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 378, "(1)"
+       line 257, "pan.___", state 390, "(1)"
+       line 261, "pan.___", state 398, "(1)"
+       line 415, "pan.___", state 441, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 499, "(1)"
+       line 257, "pan.___", state 511, "(1)"
+       line 261, "pan.___", state 519, "(1)"
+       line 415, "pan.___", state 552, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 570, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 584, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 610, "(1)"
+       line 257, "pan.___", state 622, "(1)"
+       line 261, "pan.___", state 630, "(1)"
+       line 415, "pan.___", state 665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 723, "(1)"
+       line 257, "pan.___", state 735, "(1)"
+       line 261, "pan.___", state 743, "(1)"
+       line 272, "pan.___", state 791, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 800, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 813, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 838, "(1)"
+       line 253, "pan.___", state 846, "(1)"
+       line 257, "pan.___", state 858, "(1)"
+       line 261, "pan.___", state 866, "(1)"
+       line 272, "pan.___", state 886, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 895, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 910, "(1)"
+       line 284, "pan.___", state 917, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 933, "(1)"
+       line 253, "pan.___", state 941, "(1)"
+       line 257, "pan.___", state 953, "(1)"
+       line 261, "pan.___", state 961, "(1)"
+       line 276, "pan.___", state 986, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 999, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1008, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1024, "(1)"
+       line 253, "pan.___", state 1032, "(1)"
+       line 257, "pan.___", state 1044, "(1)"
+       line 261, "pan.___", state 1052, "(1)"
+       line 276, "pan.___", state 1077, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1090, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1099, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1115, "(1)"
+       line 253, "pan.___", state 1123, "(1)"
+       line 257, "pan.___", state 1135, "(1)"
+       line 261, "pan.___", state 1143, "(1)"
+       line 276, "pan.___", state 1168, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1181, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1190, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1206, "(1)"
+       line 253, "pan.___", state 1214, "(1)"
+       line 257, "pan.___", state 1226, "(1)"
+       line 261, "pan.___", state 1234, "(1)"
+       line 1237, "pan.___", state 1249, "-end-"
+       (78 of 1249 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1302, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 1.39e+03 seconds
+pan: rate 10656.973 states/second
+pan: avg transition delay 2.9798e-06 usec
+cp .input.spin urcu_progress_reader.spin.input
+cp .input.spin.trail urcu_progress_reader.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_reader.spin.input b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_reader.spin.input
new file mode 100644 (file)
index 0000000..7cbeb11
--- /dev/null
@@ -0,0 +1,1273 @@
+#define READER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer.define b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer.define
new file mode 100644 (file)
index 0000000..1e4417f
--- /dev/null
@@ -0,0 +1 @@
+#define WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer.log b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer.log
new file mode 100644 (file)
index 0000000..065cb84
--- /dev/null
@@ -0,0 +1,455 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1295)
+depth 23: Claim reached state 9 (line 1300)
+depth 1404: Claim reached state 9 (line 1299)
+Depth=    4122 States=    1e+06 Transitions= 2.18e+07 Memory=   494.377        t=   63.1 R=   2e+04
+Depth=    4237 States=    2e+06 Transitions= 4.19e+07 Memory=   522.209        t=    122 R=   2e+04
+Depth=    4557 States=    3e+06 Transitions= 6.67e+07 Memory=   550.139        t=    195 R=   2e+04
+pan: resizing hashtable to -w22..  done
+Depth=    4762 States=    4e+06 Transitions= 9.06e+07 Memory=   609.190        t=    264 R=   2e+04
+Depth=    4793 States=    5e+06 Transitions= 1.25e+08 Memory=   666.709        t=    366 R=   1e+04
+Depth=    4793 States=    6e+06 Transitions= 1.62e+08 Memory=   713.877        t=    473 R=   1e+04
+Depth=    4980 States=    7e+06 Transitions= 1.97e+08 Memory=   760.752        t=    578 R=   1e+04
+Depth=    4980 States=    8e+06 Transitions= 2.29e+08 Memory=   809.190        t=    674 R=   1e+04
+Depth=    4980 States=    9e+06 Transitions= 2.66e+08 Memory=   862.803        t=    784 R=   1e+04
+pan: resizing hashtable to -w24..  done
+Depth=    4980 States=    1e+07 Transitions= 2.98e+08 Memory=  1029.377        t=    880 R=   1e+04
+Depth=    4980 States=  1.1e+07 Transitions= 3.34e+08 Memory=  1080.061        t=    987 R=   1e+04
+Depth=    4980 States=  1.2e+07 Transitions= 3.69e+08 Memory=  1129.279        t= 1.09e+03 R=   1e+04
+Depth=    4980 States=  1.3e+07 Transitions= 4.01e+08 Memory=  1177.912        t= 1.18e+03 R=   1e+04
+Depth=    4980 States=  1.4e+07 Transitions=  4.4e+08 Memory=  1226.447        t= 1.3e+03 R=   1e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 4980, errors: 0
+  7808338 states, stored (1.48869e+07 visited)
+4.5734827e+08 states, matched
+4.7223515e+08 transitions (= visited+matched)
+7.5648218e+09 atomic steps
+hash conflicts: 1.2909926e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+  863.807      equivalent memory usage for states (stored*(State-vector + overhead))
+  691.866      actual memory usage for states (compression: 80.09%)
+               state-vector as stored = 65 byte + 28 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+ 1277.131      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 411, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 81, "(1)"
+       line 257, "pan.___", state 101, "(1)"
+       line 261, "pan.___", state 109, "(1)"
+       line 597, "pan.___", state 128, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 411, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 167, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 199, "(1)"
+       line 257, "pan.___", state 219, "(1)"
+       line 261, "pan.___", state 227, "(1)"
+       line 411, "pan.___", state 246, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 278, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 292, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 310, "(1)"
+       line 257, "pan.___", state 330, "(1)"
+       line 261, "pan.___", state 338, "(1)"
+       line 411, "pan.___", state 359, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 361, "(1)"
+       line 411, "pan.___", state 362, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 362, "else"
+       line 411, "pan.___", state 365, "(1)"
+       line 415, "pan.___", state 373, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 375, "(1)"
+       line 415, "pan.___", state 376, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 376, "else"
+       line 415, "pan.___", state 379, "(1)"
+       line 415, "pan.___", state 380, "(1)"
+       line 415, "pan.___", state 380, "(1)"
+       line 413, "pan.___", state 385, "((i<1))"
+       line 413, "pan.___", state 385, "((i>=1))"
+       line 420, "pan.___", state 391, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 393, "(1)"
+       line 420, "pan.___", state 394, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 394, "else"
+       line 420, "pan.___", state 397, "(1)"
+       line 420, "pan.___", state 398, "(1)"
+       line 420, "pan.___", state 398, "(1)"
+       line 424, "pan.___", state 405, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 407, "(1)"
+       line 424, "pan.___", state 408, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 408, "else"
+       line 424, "pan.___", state 411, "(1)"
+       line 424, "pan.___", state 412, "(1)"
+       line 424, "pan.___", state 412, "(1)"
+       line 422, "pan.___", state 417, "((i<2))"
+       line 422, "pan.___", state 417, "((i>=2))"
+       line 249, "pan.___", state 423, "(1)"
+       line 253, "pan.___", state 431, "(1)"
+       line 253, "pan.___", state 432, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 432, "else"
+       line 251, "pan.___", state 437, "((i<1))"
+       line 251, "pan.___", state 437, "((i>=1))"
+       line 257, "pan.___", state 443, "(1)"
+       line 257, "pan.___", state 444, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 444, "else"
+       line 261, "pan.___", state 451, "(1)"
+       line 261, "pan.___", state 452, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 452, "else"
+       line 259, "pan.___", state 457, "((i<2))"
+       line 259, "pan.___", state 457, "((i>=2))"
+       line 266, "pan.___", state 461, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 461, "else"
+       line 431, "pan.___", state 463, "(1)"
+       line 431, "pan.___", state 463, "(1)"
+       line 597, "pan.___", state 466, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 597, "pan.___", state 467, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 597, "pan.___", state 468, "(1)"
+       line 272, "pan.___", state 472, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 483, "(1)"
+       line 280, "pan.___", state 494, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 503, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 519, "(1)"
+       line 253, "pan.___", state 527, "(1)"
+       line 257, "pan.___", state 539, "(1)"
+       line 261, "pan.___", state 547, "(1)"
+       line 411, "pan.___", state 565, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 579, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 597, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 611, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 629, "(1)"
+       line 253, "pan.___", state 637, "(1)"
+       line 257, "pan.___", state 649, "(1)"
+       line 261, "pan.___", state 657, "(1)"
+       line 411, "pan.___", state 683, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 715, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 729, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 747, "(1)"
+       line 257, "pan.___", state 767, "(1)"
+       line 261, "pan.___", state 775, "(1)"
+       line 411, "pan.___", state 794, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 796, "(1)"
+       line 411, "pan.___", state 797, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 797, "else"
+       line 411, "pan.___", state 800, "(1)"
+       line 415, "pan.___", state 808, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 810, "(1)"
+       line 415, "pan.___", state 811, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 811, "else"
+       line 415, "pan.___", state 814, "(1)"
+       line 415, "pan.___", state 815, "(1)"
+       line 415, "pan.___", state 815, "(1)"
+       line 413, "pan.___", state 820, "((i<1))"
+       line 413, "pan.___", state 820, "((i>=1))"
+       line 420, "pan.___", state 826, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 828, "(1)"
+       line 420, "pan.___", state 829, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 829, "else"
+       line 420, "pan.___", state 832, "(1)"
+       line 420, "pan.___", state 833, "(1)"
+       line 420, "pan.___", state 833, "(1)"
+       line 424, "pan.___", state 840, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 842, "(1)"
+       line 424, "pan.___", state 843, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 843, "else"
+       line 424, "pan.___", state 846, "(1)"
+       line 424, "pan.___", state 847, "(1)"
+       line 424, "pan.___", state 847, "(1)"
+       line 422, "pan.___", state 852, "((i<2))"
+       line 422, "pan.___", state 852, "((i>=2))"
+       line 249, "pan.___", state 858, "(1)"
+       line 253, "pan.___", state 866, "(1)"
+       line 253, "pan.___", state 867, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 867, "else"
+       line 251, "pan.___", state 872, "((i<1))"
+       line 251, "pan.___", state 872, "((i>=1))"
+       line 257, "pan.___", state 878, "(1)"
+       line 257, "pan.___", state 879, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 879, "else"
+       line 261, "pan.___", state 886, "(1)"
+       line 261, "pan.___", state 887, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 887, "else"
+       line 259, "pan.___", state 892, "((i<2))"
+       line 259, "pan.___", state 892, "((i>=2))"
+       line 266, "pan.___", state 896, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 896, "else"
+       line 431, "pan.___", state 898, "(1)"
+       line 431, "pan.___", state 898, "(1)"
+       line 605, "pan.___", state 902, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 411, "pan.___", state 907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 921, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 971, "(1)"
+       line 253, "pan.___", state 979, "(1)"
+       line 257, "pan.___", state 991, "(1)"
+       line 261, "pan.___", state 999, "(1)"
+       line 411, "pan.___", state 1021, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1053, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1067, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1085, "(1)"
+       line 257, "pan.___", state 1105, "(1)"
+       line 261, "pan.___", state 1113, "(1)"
+       line 411, "pan.___", state 1136, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1168, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1182, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1200, "(1)"
+       line 257, "pan.___", state 1220, "(1)"
+       line 261, "pan.___", state 1228, "(1)"
+       line 411, "pan.___", state 1247, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1279, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1293, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1311, "(1)"
+       line 257, "pan.___", state 1331, "(1)"
+       line 261, "pan.___", state 1339, "(1)"
+       line 272, "pan.___", state 1360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1382, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1391, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1407, "(1)"
+       line 253, "pan.___", state 1415, "(1)"
+       line 257, "pan.___", state 1427, "(1)"
+       line 261, "pan.___", state 1435, "(1)"
+       line 411, "pan.___", state 1453, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1467, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1485, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1499, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1517, "(1)"
+       line 253, "pan.___", state 1525, "(1)"
+       line 257, "pan.___", state 1537, "(1)"
+       line 261, "pan.___", state 1545, "(1)"
+       line 411, "pan.___", state 1564, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1578, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1596, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1610, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1628, "(1)"
+       line 253, "pan.___", state 1636, "(1)"
+       line 257, "pan.___", state 1648, "(1)"
+       line 261, "pan.___", state 1656, "(1)"
+       line 411, "pan.___", state 1678, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1710, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1724, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1742, "(1)"
+       line 257, "pan.___", state 1762, "(1)"
+       line 261, "pan.___", state 1770, "(1)"
+       line 644, "pan.___", state 1789, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 411, "pan.___", state 1796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1828, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1842, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1860, "(1)"
+       line 257, "pan.___", state 1880, "(1)"
+       line 261, "pan.___", state 1888, "(1)"
+       line 411, "pan.___", state 1907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 1939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 1953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1971, "(1)"
+       line 257, "pan.___", state 1991, "(1)"
+       line 261, "pan.___", state 1999, "(1)"
+       line 411, "pan.___", state 2020, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2022, "(1)"
+       line 411, "pan.___", state 2023, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 2023, "else"
+       line 411, "pan.___", state 2026, "(1)"
+       line 415, "pan.___", state 2034, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2036, "(1)"
+       line 415, "pan.___", state 2037, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2037, "else"
+       line 415, "pan.___", state 2040, "(1)"
+       line 415, "pan.___", state 2041, "(1)"
+       line 415, "pan.___", state 2041, "(1)"
+       line 413, "pan.___", state 2046, "((i<1))"
+       line 413, "pan.___", state 2046, "((i>=1))"
+       line 420, "pan.___", state 2052, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2054, "(1)"
+       line 420, "pan.___", state 2055, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 420, "pan.___", state 2055, "else"
+       line 420, "pan.___", state 2058, "(1)"
+       line 420, "pan.___", state 2059, "(1)"
+       line 420, "pan.___", state 2059, "(1)"
+       line 424, "pan.___", state 2066, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2068, "(1)"
+       line 424, "pan.___", state 2069, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 424, "pan.___", state 2069, "else"
+       line 424, "pan.___", state 2072, "(1)"
+       line 424, "pan.___", state 2073, "(1)"
+       line 424, "pan.___", state 2073, "(1)"
+       line 422, "pan.___", state 2078, "((i<2))"
+       line 422, "pan.___", state 2078, "((i>=2))"
+       line 249, "pan.___", state 2084, "(1)"
+       line 253, "pan.___", state 2092, "(1)"
+       line 253, "pan.___", state 2093, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 253, "pan.___", state 2093, "else"
+       line 251, "pan.___", state 2098, "((i<1))"
+       line 251, "pan.___", state 2098, "((i>=1))"
+       line 257, "pan.___", state 2104, "(1)"
+       line 257, "pan.___", state 2105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2105, "else"
+       line 261, "pan.___", state 2112, "(1)"
+       line 261, "pan.___", state 2113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 261, "pan.___", state 2113, "else"
+       line 259, "pan.___", state 2118, "((i<2))"
+       line 259, "pan.___", state 2118, "((i>=2))"
+       line 266, "pan.___", state 2122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 266, "pan.___", state 2122, "else"
+       line 431, "pan.___", state 2124, "(1)"
+       line 431, "pan.___", state 2124, "(1)"
+       line 644, "pan.___", state 2127, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 644, "pan.___", state 2128, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 644, "pan.___", state 2129, "(1)"
+       line 272, "pan.___", state 2133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2164, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2180, "(1)"
+       line 253, "pan.___", state 2188, "(1)"
+       line 257, "pan.___", state 2200, "(1)"
+       line 261, "pan.___", state 2208, "(1)"
+       line 411, "pan.___", state 2226, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2240, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2258, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2272, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2290, "(1)"
+       line 253, "pan.___", state 2298, "(1)"
+       line 257, "pan.___", state 2310, "(1)"
+       line 261, "pan.___", state 2318, "(1)"
+       line 272, "pan.___", state 2340, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 2349, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 2362, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 2371, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2387, "(1)"
+       line 253, "pan.___", state 2395, "(1)"
+       line 257, "pan.___", state 2407, "(1)"
+       line 261, "pan.___", state 2415, "(1)"
+       line 411, "pan.___", state 2433, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2447, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2465, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2479, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2497, "(1)"
+       line 253, "pan.___", state 2505, "(1)"
+       line 257, "pan.___", state 2517, "(1)"
+       line 261, "pan.___", state 2525, "(1)"
+       line 411, "pan.___", state 2544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2558, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 2576, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 2590, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 2608, "(1)"
+       line 253, "pan.___", state 2616, "(1)"
+       line 257, "pan.___", state 2628, "(1)"
+       line 261, "pan.___", state 2636, "(1)"
+       line 249, "pan.___", state 2667, "(1)"
+       line 257, "pan.___", state 2687, "(1)"
+       line 261, "pan.___", state 2695, "(1)"
+       line 249, "pan.___", state 2710, "(1)"
+       line 253, "pan.___", state 2718, "(1)"
+       line 257, "pan.___", state 2730, "(1)"
+       line 261, "pan.___", state 2738, "(1)"
+       line 898, "pan.___", state 2755, "-end-"
+       (259 of 2755 states)
+unreached in proctype urcu_writer
+       line 411, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 82, "(1)"
+       line 253, "pan.___", state 90, "(1)"
+       line 257, "pan.___", state 102, "(1)"
+       line 272, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 257, "(1)"
+       line 253, "pan.___", state 265, "(1)"
+       line 257, "pan.___", state 277, "(1)"
+       line 261, "pan.___", state 285, "(1)"
+       line 415, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 378, "(1)"
+       line 257, "pan.___", state 390, "(1)"
+       line 261, "pan.___", state 398, "(1)"
+       line 415, "pan.___", state 441, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 499, "(1)"
+       line 257, "pan.___", state 511, "(1)"
+       line 261, "pan.___", state 519, "(1)"
+       line 415, "pan.___", state 552, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 570, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 584, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 610, "(1)"
+       line 257, "pan.___", state 622, "(1)"
+       line 261, "pan.___", state 630, "(1)"
+       line 415, "pan.___", state 665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 420, "pan.___", state 683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 424, "pan.___", state 697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 253, "pan.___", state 723, "(1)"
+       line 257, "pan.___", state 735, "(1)"
+       line 261, "pan.___", state 743, "(1)"
+       line 272, "pan.___", state 796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 276, "pan.___", state 805, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 820, "(1)"
+       line 284, "pan.___", state 827, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 843, "(1)"
+       line 253, "pan.___", state 851, "(1)"
+       line 257, "pan.___", state 863, "(1)"
+       line 261, "pan.___", state 871, "(1)"
+       line 276, "pan.___", state 896, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 909, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 918, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 934, "(1)"
+       line 253, "pan.___", state 942, "(1)"
+       line 257, "pan.___", state 954, "(1)"
+       line 261, "pan.___", state 962, "(1)"
+       line 276, "pan.___", state 987, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1000, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1009, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1025, "(1)"
+       line 253, "pan.___", state 1033, "(1)"
+       line 257, "pan.___", state 1045, "(1)"
+       line 261, "pan.___", state 1053, "(1)"
+       line 276, "pan.___", state 1078, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 280, "pan.___", state 1091, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 284, "pan.___", state 1100, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 249, "pan.___", state 1116, "(1)"
+       line 253, "pan.___", state 1124, "(1)"
+       line 257, "pan.___", state 1136, "(1)"
+       line 261, "pan.___", state 1144, "(1)"
+       line 1237, "pan.___", state 1159, "-end-"
+       (71 of 1159 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1302, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 1.4e+03 seconds
+pan: rate 10667.087 states/second
+pan: avg transition delay 2.9553e-06 usec
+cp .input.spin urcu_progress_writer.spin.input
+cp .input.spin.trail urcu_progress_writer.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer.spin.input b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer.spin.input
new file mode 100644 (file)
index 0000000..323cc66
--- /dev/null
@@ -0,0 +1,1273 @@
+#define WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.define b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.define
new file mode 100644 (file)
index 0000000..8d304f5
--- /dev/null
@@ -0,0 +1,2 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.log b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.log
new file mode 100644 (file)
index 0000000..43b08ac
--- /dev/null
@@ -0,0 +1,647 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer_error.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1296)
+depth 23: Claim reached state 9 (line 1301)
+depth 1404: Claim reached state 9 (line 1300)
+pan: acceptance cycle (at depth 1950)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 4997, errors: 1
+    64372 states, stored (197067 visited)
+  2733089 states, matched
+  2930156 transitions (= visited+matched)
+ 44244571 atomic steps
+hash conflicts:     43483 (resolved)
+
+Stats on memory usage (in Megabytes):
+    7.121      equivalent memory usage for states (stored*(State-vector + overhead))
+    6.154      actual memory usage for states (compression: 86.41%)
+               state-vector as stored = 72 byte + 28 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  471.818      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 412, "pan.___", state 17, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 49, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 63, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 81, "(1)"
+       line 258, "pan.___", state 101, "(1)"
+       line 262, "pan.___", state 109, "(1)"
+       line 598, "pan.___", state 128, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 412, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 167, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 199, "(1)"
+       line 258, "pan.___", state 219, "(1)"
+       line 262, "pan.___", state 227, "(1)"
+       line 412, "pan.___", state 246, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 278, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 292, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 310, "(1)"
+       line 258, "pan.___", state 330, "(1)"
+       line 262, "pan.___", state 338, "(1)"
+       line 412, "pan.___", state 359, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 361, "(1)"
+       line 412, "pan.___", state 362, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 412, "pan.___", state 362, "else"
+       line 412, "pan.___", state 365, "(1)"
+       line 416, "pan.___", state 373, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 375, "(1)"
+       line 416, "pan.___", state 376, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 416, "pan.___", state 376, "else"
+       line 416, "pan.___", state 379, "(1)"
+       line 416, "pan.___", state 380, "(1)"
+       line 416, "pan.___", state 380, "(1)"
+       line 414, "pan.___", state 385, "((i<1))"
+       line 414, "pan.___", state 385, "((i>=1))"
+       line 421, "pan.___", state 391, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 393, "(1)"
+       line 421, "pan.___", state 394, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 421, "pan.___", state 394, "else"
+       line 421, "pan.___", state 397, "(1)"
+       line 421, "pan.___", state 398, "(1)"
+       line 421, "pan.___", state 398, "(1)"
+       line 425, "pan.___", state 405, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 407, "(1)"
+       line 425, "pan.___", state 408, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 425, "pan.___", state 408, "else"
+       line 425, "pan.___", state 411, "(1)"
+       line 425, "pan.___", state 412, "(1)"
+       line 425, "pan.___", state 412, "(1)"
+       line 423, "pan.___", state 417, "((i<2))"
+       line 423, "pan.___", state 417, "((i>=2))"
+       line 250, "pan.___", state 423, "(1)"
+       line 254, "pan.___", state 431, "(1)"
+       line 254, "pan.___", state 432, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 432, "else"
+       line 252, "pan.___", state 437, "((i<1))"
+       line 252, "pan.___", state 437, "((i>=1))"
+       line 258, "pan.___", state 443, "(1)"
+       line 258, "pan.___", state 444, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 444, "else"
+       line 262, "pan.___", state 451, "(1)"
+       line 262, "pan.___", state 452, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 452, "else"
+       line 260, "pan.___", state 457, "((i<2))"
+       line 260, "pan.___", state 457, "((i>=2))"
+       line 267, "pan.___", state 461, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 461, "else"
+       line 432, "pan.___", state 463, "(1)"
+       line 432, "pan.___", state 463, "(1)"
+       line 598, "pan.___", state 466, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 598, "pan.___", state 467, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 598, "pan.___", state 468, "(1)"
+       line 273, "pan.___", state 472, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 277, "pan.___", state 483, "(1)"
+       line 281, "pan.___", state 494, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 285, "pan.___", state 503, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 519, "(1)"
+       line 254, "pan.___", state 527, "(1)"
+       line 258, "pan.___", state 539, "(1)"
+       line 262, "pan.___", state 547, "(1)"
+       line 412, "pan.___", state 565, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 579, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 597, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 611, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 629, "(1)"
+       line 254, "pan.___", state 637, "(1)"
+       line 258, "pan.___", state 649, "(1)"
+       line 262, "pan.___", state 657, "(1)"
+       line 412, "pan.___", state 683, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 715, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 729, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 747, "(1)"
+       line 258, "pan.___", state 767, "(1)"
+       line 262, "pan.___", state 775, "(1)"
+       line 412, "pan.___", state 794, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 796, "(1)"
+       line 412, "pan.___", state 797, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 412, "pan.___", state 797, "else"
+       line 412, "pan.___", state 800, "(1)"
+       line 416, "pan.___", state 808, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 810, "(1)"
+       line 416, "pan.___", state 811, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 416, "pan.___", state 811, "else"
+       line 416, "pan.___", state 814, "(1)"
+       line 416, "pan.___", state 815, "(1)"
+       line 416, "pan.___", state 815, "(1)"
+       line 414, "pan.___", state 820, "((i<1))"
+       line 414, "pan.___", state 820, "((i>=1))"
+       line 421, "pan.___", state 826, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 828, "(1)"
+       line 421, "pan.___", state 829, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 421, "pan.___", state 829, "else"
+       line 421, "pan.___", state 832, "(1)"
+       line 421, "pan.___", state 833, "(1)"
+       line 421, "pan.___", state 833, "(1)"
+       line 425, "pan.___", state 840, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 842, "(1)"
+       line 425, "pan.___", state 843, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 425, "pan.___", state 843, "else"
+       line 425, "pan.___", state 846, "(1)"
+       line 425, "pan.___", state 847, "(1)"
+       line 425, "pan.___", state 847, "(1)"
+       line 423, "pan.___", state 852, "((i<2))"
+       line 423, "pan.___", state 852, "((i>=2))"
+       line 250, "pan.___", state 858, "(1)"
+       line 254, "pan.___", state 866, "(1)"
+       line 254, "pan.___", state 867, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 867, "else"
+       line 252, "pan.___", state 872, "((i<1))"
+       line 252, "pan.___", state 872, "((i>=1))"
+       line 258, "pan.___", state 878, "(1)"
+       line 258, "pan.___", state 879, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 879, "else"
+       line 262, "pan.___", state 886, "(1)"
+       line 262, "pan.___", state 887, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 887, "else"
+       line 260, "pan.___", state 892, "((i<2))"
+       line 260, "pan.___", state 892, "((i>=2))"
+       line 267, "pan.___", state 896, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 896, "else"
+       line 432, "pan.___", state 898, "(1)"
+       line 432, "pan.___", state 898, "(1)"
+       line 606, "pan.___", state 902, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 412, "pan.___", state 907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 921, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 971, "(1)"
+       line 254, "pan.___", state 979, "(1)"
+       line 258, "pan.___", state 991, "(1)"
+       line 262, "pan.___", state 999, "(1)"
+       line 412, "pan.___", state 1021, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1053, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1067, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1085, "(1)"
+       line 258, "pan.___", state 1105, "(1)"
+       line 262, "pan.___", state 1113, "(1)"
+       line 412, "pan.___", state 1136, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1168, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1182, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1200, "(1)"
+       line 258, "pan.___", state 1220, "(1)"
+       line 262, "pan.___", state 1228, "(1)"
+       line 412, "pan.___", state 1247, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1279, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1293, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1311, "(1)"
+       line 258, "pan.___", state 1331, "(1)"
+       line 262, "pan.___", state 1339, "(1)"
+       line 273, "pan.___", state 1360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 281, "pan.___", state 1382, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 285, "pan.___", state 1391, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1407, "(1)"
+       line 254, "pan.___", state 1415, "(1)"
+       line 258, "pan.___", state 1427, "(1)"
+       line 262, "pan.___", state 1435, "(1)"
+       line 412, "pan.___", state 1453, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 1467, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1485, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1499, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1517, "(1)"
+       line 254, "pan.___", state 1525, "(1)"
+       line 258, "pan.___", state 1537, "(1)"
+       line 262, "pan.___", state 1545, "(1)"
+       line 412, "pan.___", state 1564, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 1578, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1596, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1610, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1628, "(1)"
+       line 254, "pan.___", state 1636, "(1)"
+       line 258, "pan.___", state 1648, "(1)"
+       line 262, "pan.___", state 1656, "(1)"
+       line 412, "pan.___", state 1678, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1710, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1724, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1742, "(1)"
+       line 258, "pan.___", state 1762, "(1)"
+       line 262, "pan.___", state 1770, "(1)"
+       line 645, "pan.___", state 1789, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 412, "pan.___", state 1796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1828, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1842, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1860, "(1)"
+       line 258, "pan.___", state 1880, "(1)"
+       line 262, "pan.___", state 1888, "(1)"
+       line 412, "pan.___", state 1907, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 1939, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 1953, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1971, "(1)"
+       line 258, "pan.___", state 1991, "(1)"
+       line 262, "pan.___", state 1999, "(1)"
+       line 412, "pan.___", state 2020, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 2022, "(1)"
+       line 412, "pan.___", state 2023, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 412, "pan.___", state 2023, "else"
+       line 412, "pan.___", state 2026, "(1)"
+       line 416, "pan.___", state 2034, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 2036, "(1)"
+       line 416, "pan.___", state 2037, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 416, "pan.___", state 2037, "else"
+       line 416, "pan.___", state 2040, "(1)"
+       line 416, "pan.___", state 2041, "(1)"
+       line 416, "pan.___", state 2041, "(1)"
+       line 414, "pan.___", state 2046, "((i<1))"
+       line 414, "pan.___", state 2046, "((i>=1))"
+       line 421, "pan.___", state 2052, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 2054, "(1)"
+       line 421, "pan.___", state 2055, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 421, "pan.___", state 2055, "else"
+       line 421, "pan.___", state 2058, "(1)"
+       line 421, "pan.___", state 2059, "(1)"
+       line 421, "pan.___", state 2059, "(1)"
+       line 425, "pan.___", state 2066, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 2068, "(1)"
+       line 425, "pan.___", state 2069, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 425, "pan.___", state 2069, "else"
+       line 425, "pan.___", state 2072, "(1)"
+       line 425, "pan.___", state 2073, "(1)"
+       line 425, "pan.___", state 2073, "(1)"
+       line 423, "pan.___", state 2078, "((i<2))"
+       line 423, "pan.___", state 2078, "((i>=2))"
+       line 250, "pan.___", state 2084, "(1)"
+       line 254, "pan.___", state 2092, "(1)"
+       line 254, "pan.___", state 2093, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 2093, "else"
+       line 252, "pan.___", state 2098, "((i<1))"
+       line 252, "pan.___", state 2098, "((i>=1))"
+       line 258, "pan.___", state 2104, "(1)"
+       line 258, "pan.___", state 2105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 2105, "else"
+       line 262, "pan.___", state 2112, "(1)"
+       line 262, "pan.___", state 2113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 2113, "else"
+       line 260, "pan.___", state 2118, "((i<2))"
+       line 260, "pan.___", state 2118, "((i>=2))"
+       line 267, "pan.___", state 2122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 2122, "else"
+       line 432, "pan.___", state 2124, "(1)"
+       line 432, "pan.___", state 2124, "(1)"
+       line 645, "pan.___", state 2127, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 645, "pan.___", state 2128, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 645, "pan.___", state 2129, "(1)"
+       line 273, "pan.___", state 2133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 281, "pan.___", state 2155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 285, "pan.___", state 2164, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 2180, "(1)"
+       line 254, "pan.___", state 2188, "(1)"
+       line 258, "pan.___", state 2200, "(1)"
+       line 262, "pan.___", state 2208, "(1)"
+       line 412, "pan.___", state 2226, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 2240, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 2258, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 2272, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 2290, "(1)"
+       line 254, "pan.___", state 2298, "(1)"
+       line 258, "pan.___", state 2310, "(1)"
+       line 262, "pan.___", state 2318, "(1)"
+       line 273, "pan.___", state 2340, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 277, "pan.___", state 2349, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 281, "pan.___", state 2362, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 285, "pan.___", state 2371, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 2387, "(1)"
+       line 254, "pan.___", state 2395, "(1)"
+       line 258, "pan.___", state 2407, "(1)"
+       line 262, "pan.___", state 2415, "(1)"
+       line 412, "pan.___", state 2433, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 2447, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 2465, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 2479, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 2497, "(1)"
+       line 254, "pan.___", state 2505, "(1)"
+       line 258, "pan.___", state 2517, "(1)"
+       line 262, "pan.___", state 2525, "(1)"
+       line 412, "pan.___", state 2544, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 2558, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 2576, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 2590, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 2608, "(1)"
+       line 254, "pan.___", state 2616, "(1)"
+       line 258, "pan.___", state 2628, "(1)"
+       line 262, "pan.___", state 2636, "(1)"
+       line 250, "pan.___", state 2667, "(1)"
+       line 258, "pan.___", state 2687, "(1)"
+       line 262, "pan.___", state 2695, "(1)"
+       line 250, "pan.___", state 2710, "(1)"
+       line 254, "pan.___", state 2718, "(1)"
+       line 258, "pan.___", state 2730, "(1)"
+       line 262, "pan.___", state 2738, "(1)"
+       line 899, "pan.___", state 2755, "-end-"
+       (259 of 2755 states)
+unreached in proctype urcu_writer
+       line 412, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 24, "(1)"
+       line 416, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 38, "(1)"
+       line 416, "pan.___", state 39, "(1)"
+       line 416, "pan.___", state 39, "(1)"
+       line 414, "pan.___", state 44, "((i<1))"
+       line 414, "pan.___", state 44, "((i>=1))"
+       line 421, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 56, "(1)"
+       line 421, "pan.___", state 57, "(1)"
+       line 421, "pan.___", state 57, "(1)"
+       line 425, "pan.___", state 64, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 70, "(1)"
+       line 425, "pan.___", state 71, "(1)"
+       line 425, "pan.___", state 71, "(1)"
+       line 423, "pan.___", state 76, "((i<2))"
+       line 423, "pan.___", state 76, "((i>=2))"
+       line 250, "pan.___", state 82, "(1)"
+       line 254, "pan.___", state 90, "(1)"
+       line 254, "pan.___", state 91, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 91, "else"
+       line 252, "pan.___", state 96, "((i<1))"
+       line 252, "pan.___", state 96, "((i>=1))"
+       line 258, "pan.___", state 102, "(1)"
+       line 258, "pan.___", state 103, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 103, "else"
+       line 262, "pan.___", state 110, "(1)"
+       line 262, "pan.___", state 111, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 111, "else"
+       line 260, "pan.___", state 116, "((i<2))"
+       line 260, "pan.___", state 116, "((i>=2))"
+       line 267, "pan.___", state 120, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 120, "else"
+       line 432, "pan.___", state 122, "(1)"
+       line 432, "pan.___", state 122, "(1)"
+       line 273, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 277, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 148, "((i<1))"
+       line 275, "pan.___", state 148, "((i>=1))"
+       line 281, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 1022, "pan.___", state 181, "old_data = cached_rcu_ptr.val[_pid]"
+       line 412, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 199, "(1)"
+       line 416, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 213, "(1)"
+       line 416, "pan.___", state 214, "(1)"
+       line 416, "pan.___", state 214, "(1)"
+       line 414, "pan.___", state 219, "((i<1))"
+       line 414, "pan.___", state 219, "((i>=1))"
+       line 421, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 231, "(1)"
+       line 421, "pan.___", state 232, "(1)"
+       line 421, "pan.___", state 232, "(1)"
+       line 425, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 245, "(1)"
+       line 425, "pan.___", state 246, "(1)"
+       line 425, "pan.___", state 246, "(1)"
+       line 423, "pan.___", state 251, "((i<2))"
+       line 423, "pan.___", state 251, "((i>=2))"
+       line 250, "pan.___", state 257, "(1)"
+       line 254, "pan.___", state 265, "(1)"
+       line 254, "pan.___", state 266, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 266, "else"
+       line 252, "pan.___", state 271, "((i<1))"
+       line 252, "pan.___", state 271, "((i>=1))"
+       line 258, "pan.___", state 277, "(1)"
+       line 258, "pan.___", state 278, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 278, "else"
+       line 262, "pan.___", state 285, "(1)"
+       line 262, "pan.___", state 286, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 286, "else"
+       line 260, "pan.___", state 291, "((i<2))"
+       line 260, "pan.___", state 291, "((i>=2))"
+       line 267, "pan.___", state 295, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 295, "else"
+       line 432, "pan.___", state 297, "(1)"
+       line 432, "pan.___", state 297, "(1)"
+       line 412, "pan.___", state 308, "(1)"
+       line 412, "pan.___", state 309, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 412, "pan.___", state 309, "else"
+       line 412, "pan.___", state 312, "(1)"
+       line 416, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 326, "(1)"
+       line 416, "pan.___", state 327, "(1)"
+       line 416, "pan.___", state 327, "(1)"
+       line 414, "pan.___", state 332, "((i<1))"
+       line 414, "pan.___", state 332, "((i>=1))"
+       line 421, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 344, "(1)"
+       line 421, "pan.___", state 345, "(1)"
+       line 421, "pan.___", state 345, "(1)"
+       line 425, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 358, "(1)"
+       line 425, "pan.___", state 359, "(1)"
+       line 425, "pan.___", state 359, "(1)"
+       line 423, "pan.___", state 364, "((i<2))"
+       line 423, "pan.___", state 364, "((i>=2))"
+       line 250, "pan.___", state 370, "(1)"
+       line 254, "pan.___", state 378, "(1)"
+       line 254, "pan.___", state 379, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 379, "else"
+       line 252, "pan.___", state 384, "((i<1))"
+       line 252, "pan.___", state 384, "((i>=1))"
+       line 258, "pan.___", state 390, "(1)"
+       line 258, "pan.___", state 391, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 391, "else"
+       line 262, "pan.___", state 398, "(1)"
+       line 262, "pan.___", state 399, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 399, "else"
+       line 260, "pan.___", state 404, "((i<2))"
+       line 260, "pan.___", state 404, "((i>=2))"
+       line 267, "pan.___", state 408, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 408, "else"
+       line 432, "pan.___", state 410, "(1)"
+       line 432, "pan.___", state 410, "(1)"
+       line 412, "pan.___", state 423, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 425, "(1)"
+       line 412, "pan.___", state 426, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 412, "pan.___", state 426, "else"
+       line 412, "pan.___", state 429, "(1)"
+       line 416, "pan.___", state 437, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 439, "(1)"
+       line 416, "pan.___", state 440, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 416, "pan.___", state 440, "else"
+       line 416, "pan.___", state 443, "(1)"
+       line 416, "pan.___", state 444, "(1)"
+       line 416, "pan.___", state 444, "(1)"
+       line 414, "pan.___", state 449, "((i<1))"
+       line 414, "pan.___", state 449, "((i>=1))"
+       line 421, "pan.___", state 455, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 457, "(1)"
+       line 421, "pan.___", state 458, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 421, "pan.___", state 458, "else"
+       line 421, "pan.___", state 461, "(1)"
+       line 421, "pan.___", state 462, "(1)"
+       line 421, "pan.___", state 462, "(1)"
+       line 425, "pan.___", state 469, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 471, "(1)"
+       line 425, "pan.___", state 472, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 425, "pan.___", state 472, "else"
+       line 425, "pan.___", state 475, "(1)"
+       line 425, "pan.___", state 476, "(1)"
+       line 425, "pan.___", state 476, "(1)"
+       line 423, "pan.___", state 481, "((i<2))"
+       line 423, "pan.___", state 481, "((i>=2))"
+       line 250, "pan.___", state 487, "(1)"
+       line 254, "pan.___", state 495, "(1)"
+       line 254, "pan.___", state 496, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 496, "else"
+       line 252, "pan.___", state 501, "((i<1))"
+       line 252, "pan.___", state 501, "((i>=1))"
+       line 258, "pan.___", state 507, "(1)"
+       line 258, "pan.___", state 508, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 508, "else"
+       line 262, "pan.___", state 515, "(1)"
+       line 262, "pan.___", state 516, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 516, "else"
+       line 267, "pan.___", state 525, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 525, "else"
+       line 432, "pan.___", state 527, "(1)"
+       line 432, "pan.___", state 527, "(1)"
+       line 412, "pan.___", state 533, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 539, "(1)"
+       line 416, "pan.___", state 547, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 553, "(1)"
+       line 416, "pan.___", state 554, "(1)"
+       line 416, "pan.___", state 554, "(1)"
+       line 414, "pan.___", state 559, "((i<1))"
+       line 414, "pan.___", state 559, "((i>=1))"
+       line 421, "pan.___", state 565, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 571, "(1)"
+       line 421, "pan.___", state 572, "(1)"
+       line 421, "pan.___", state 572, "(1)"
+       line 425, "pan.___", state 579, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 585, "(1)"
+       line 425, "pan.___", state 586, "(1)"
+       line 425, "pan.___", state 586, "(1)"
+       line 423, "pan.___", state 591, "((i<2))"
+       line 423, "pan.___", state 591, "((i>=2))"
+       line 250, "pan.___", state 597, "(1)"
+       line 254, "pan.___", state 605, "(1)"
+       line 254, "pan.___", state 606, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 606, "else"
+       line 252, "pan.___", state 611, "((i<1))"
+       line 252, "pan.___", state 611, "((i>=1))"
+       line 258, "pan.___", state 617, "(1)"
+       line 258, "pan.___", state 618, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 618, "else"
+       line 262, "pan.___", state 625, "(1)"
+       line 262, "pan.___", state 626, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 626, "else"
+       line 267, "pan.___", state 635, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 635, "else"
+       line 432, "pan.___", state 637, "(1)"
+       line 432, "pan.___", state 637, "(1)"
+       line 412, "pan.___", state 644, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 412, "pan.___", state 650, "(1)"
+       line 416, "pan.___", state 658, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 416, "pan.___", state 664, "(1)"
+       line 416, "pan.___", state 665, "(1)"
+       line 416, "pan.___", state 665, "(1)"
+       line 414, "pan.___", state 670, "((i<1))"
+       line 414, "pan.___", state 670, "((i>=1))"
+       line 421, "pan.___", state 676, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 682, "(1)"
+       line 421, "pan.___", state 683, "(1)"
+       line 421, "pan.___", state 683, "(1)"
+       line 425, "pan.___", state 690, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 696, "(1)"
+       line 425, "pan.___", state 697, "(1)"
+       line 425, "pan.___", state 697, "(1)"
+       line 423, "pan.___", state 702, "((i<2))"
+       line 423, "pan.___", state 702, "((i>=2))"
+       line 250, "pan.___", state 708, "(1)"
+       line 254, "pan.___", state 716, "(1)"
+       line 254, "pan.___", state 717, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 717, "else"
+       line 252, "pan.___", state 722, "((i<1))"
+       line 252, "pan.___", state 722, "((i>=1))"
+       line 258, "pan.___", state 728, "(1)"
+       line 258, "pan.___", state 729, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 729, "else"
+       line 262, "pan.___", state 736, "(1)"
+       line 262, "pan.___", state 737, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 737, "else"
+       line 260, "pan.___", state 742, "((i<2))"
+       line 260, "pan.___", state 742, "((i>=2))"
+       line 267, "pan.___", state 746, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 746, "else"
+       line 432, "pan.___", state 748, "(1)"
+       line 432, "pan.___", state 748, "(1)"
+       line 416, "pan.___", state 771, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 789, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 803, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 254, "pan.___", state 829, "(1)"
+       line 258, "pan.___", state 841, "(1)"
+       line 262, "pan.___", state 849, "(1)"
+       line 416, "pan.___", state 887, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 421, "pan.___", state 905, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 425, "pan.___", state 919, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 254, "pan.___", state 945, "(1)"
+       line 258, "pan.___", state 957, "(1)"
+       line 262, "pan.___", state 965, "(1)"
+       line 273, "pan.___", state 1009, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 277, "pan.___", state 1018, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 1026, "((i<1))"
+       line 275, "pan.___", state 1026, "((i>=1))"
+       line 281, "pan.___", state 1033, "(1)"
+       line 281, "pan.___", state 1034, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 281, "pan.___", state 1034, "else"
+       line 285, "pan.___", state 1040, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 283, "pan.___", state 1048, "((i<2))"
+       line 283, "pan.___", state 1048, "((i>=2))"
+       line 250, "pan.___", state 1056, "(1)"
+       line 254, "pan.___", state 1064, "(1)"
+       line 254, "pan.___", state 1065, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 254, "pan.___", state 1065, "else"
+       line 252, "pan.___", state 1070, "((i<1))"
+       line 252, "pan.___", state 1070, "((i>=1))"
+       line 258, "pan.___", state 1076, "(1)"
+       line 258, "pan.___", state 1077, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 258, "pan.___", state 1077, "else"
+       line 262, "pan.___", state 1084, "(1)"
+       line 262, "pan.___", state 1085, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 262, "pan.___", state 1085, "else"
+       line 267, "pan.___", state 1094, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 267, "pan.___", state 1094, "else"
+       line 277, "pan.___", state 1109, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 281, "pan.___", state 1122, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 285, "pan.___", state 1131, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 250, "pan.___", state 1147, "(1)"
+       line 254, "pan.___", state 1155, "(1)"
+       line 258, "pan.___", state 1167, "(1)"
+       line 262, "pan.___", state 1175, "(1)"
+       line 1238, "pan.___", state 1190, "-end-"
+       (192 of 1190 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1303, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 8.1 seconds
+pan: rate 24329.259 states/second
+pan: avg transition delay 2.7644e-06 usec
+cp .input.spin urcu_progress_writer_error.spin.input
+cp .input.spin.trail urcu_progress_writer_error.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow-intel-no-ipi'
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.spin.input b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.spin.input
new file mode 100644 (file)
index 0000000..67fec75
--- /dev/null
@@ -0,0 +1,1274 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+//#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader(i, j)            smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken)           \
+       :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) ->        \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_TRUE                                          \
+                         | READ_PROD_C_IF_TRUE_READ    /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1);         \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_PROC_SECOND_MB         /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB,       /* mb() orders reads */
+                                           READ_LOCK_NESTED_OUT        /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT    /* RAW */
+                                           | READ_UNLOCK_OUT,          /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+                       /* The memory barrier will invalidate the
+                        * second read done as prefetching. Note that all
+                        * instructions with side-effects depending on
+                        * WRITE_PROC_SECOND_READ_GP should also depend on
+                        * completion of this busy-waiting loop. */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WAIT |     //test  /* no dependency. Could pre-fetch, no side-effect. */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT                 /* dependency on first wait, because this
+                                                                        * instruction has globally observable
+                                                                        * side-effects.
+                                                                        */
+                                 | WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       //smp_mb(i);    /* TEST */
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.spin.input.trail b/formal-model/urcu-controldataflow-intel-no-ipi/urcu_progress_writer_error.spin.input.trail
new file mode 100644 (file)
index 0000000..e5fdf5a
--- /dev/null
@@ -0,0 +1,5000 @@
+-2:3:-2
+-4:-4:-4
+1:0:4025
+2:3:3945
+3:3:3948
+4:3:3948
+5:3:3951
+6:3:3959
+7:3:3959
+8:3:3962
+9:3:3968
+10:3:3972
+11:3:3972
+12:3:3975
+13:3:3985
+14:3:3993
+15:3:3993
+16:3:3996
+17:3:4002
+18:3:4006
+19:3:4006
+20:3:4009
+21:3:4015
+22:3:4019
+23:3:4020
+24:0:4025
+25:3:4022
+26:0:4025
+27:2:2757
+28:0:4025
+29:2:2763
+30:0:4025
+31:2:2764
+32:0:4025
+33:2:2765
+34:0:4023
+35:2:2766
+36:0:4029
+37:2:2767
+38:0:4029
+39:2:2768
+40:2:2769
+41:2:2773
+42:2:2774
+43:2:2782
+44:2:2783
+45:2:2787
+46:2:2788
+47:2:2796
+48:2:2801
+49:2:2805
+50:2:2806
+51:2:2814
+52:2:2815
+53:2:2819
+54:2:2820
+55:2:2814
+56:2:2815
+57:2:2819
+58:2:2820
+59:2:2828
+60:2:2833
+61:2:2840
+62:2:2841
+63:2:2848
+64:2:2853
+65:2:2860
+66:2:2861
+67:2:2860
+68:2:2861
+69:2:2868
+70:2:2878
+71:0:4029
+72:2:2767
+73:0:4029
+74:2:2882
+75:2:2886
+76:2:2887
+77:2:2891
+78:2:2895
+79:2:2896
+80:2:2900
+81:2:2908
+82:2:2909
+83:2:2913
+84:2:2917
+85:2:2918
+86:2:2913
+87:2:2914
+88:2:2922
+89:0:4029
+90:2:2767
+91:0:4029
+92:2:2930
+93:2:2931
+94:2:2932
+95:0:4029
+96:2:2767
+97:0:4029
+98:2:2937
+99:0:4029
+100:2:3764
+101:2:3765
+102:2:3769
+103:2:3773
+104:2:3774
+105:2:3778
+106:2:3783
+107:2:3791
+108:2:3795
+109:2:3796
+110:2:3791
+111:2:3795
+112:2:3796
+113:2:3800
+114:2:3807
+115:2:3814
+116:2:3815
+117:2:3822
+118:2:3827
+119:2:3834
+120:2:3835
+121:2:3834
+122:2:3835
+123:2:3842
+124:2:3846
+125:0:4029
+126:2:2939
+127:2:3745
+128:0:4029
+129:2:2767
+130:0:4029
+131:2:2940
+132:0:4029
+133:2:2767
+134:0:4029
+135:2:2943
+136:2:2944
+137:2:2948
+138:2:2949
+139:2:2957
+140:2:2958
+141:2:2962
+142:2:2963
+143:2:2971
+144:2:2976
+145:2:2980
+146:2:2981
+147:2:2989
+148:2:2990
+149:2:2994
+150:2:2995
+151:2:2989
+152:2:2990
+153:2:2994
+154:2:2995
+155:2:3003
+156:2:3008
+157:2:3015
+158:2:3016
+159:2:3023
+160:2:3028
+161:2:3035
+162:2:3036
+163:2:3035
+164:2:3036
+165:2:3043
+166:2:3052
+167:0:4029
+168:2:2767
+169:0:4029
+170:2:3056
+171:2:3057
+172:2:3058
+173:2:3070
+174:2:3071
+175:2:3075
+176:2:3076
+177:2:3084
+178:2:3089
+179:2:3093
+180:2:3094
+181:2:3102
+182:2:3103
+183:2:3107
+184:2:3108
+185:2:3102
+186:2:3103
+187:2:3107
+188:2:3108
+189:2:3116
+190:2:3121
+191:2:3128
+192:2:3129
+193:2:3136
+194:2:3141
+195:2:3148
+196:2:3149
+197:2:3148
+198:2:3149
+199:2:3156
+200:2:3169
+201:2:3170
+202:0:4029
+203:2:2767
+204:0:4029
+205:2:3283
+206:2:3284
+207:2:3288
+208:2:3289
+209:2:3297
+210:2:3298
+211:2:3302
+212:2:3303
+213:2:3311
+214:2:3316
+215:2:3320
+216:2:3321
+217:2:3329
+218:2:3330
+219:2:3334
+220:2:3335
+221:2:3329
+222:2:3330
+223:2:3334
+224:2:3335
+225:2:3343
+226:2:3348
+227:2:3355
+228:2:3356
+229:2:3363
+230:2:3368
+231:2:3375
+232:2:3376
+233:2:3375
+234:2:3376
+235:2:3383
+236:0:4029
+237:2:2767
+238:0:4029
+239:2:3394
+240:2:3395
+241:2:3399
+242:2:3400
+243:2:3408
+244:2:3409
+245:2:3413
+246:2:3414
+247:2:3422
+248:2:3427
+249:2:3431
+250:2:3432
+251:2:3440
+252:2:3441
+253:2:3445
+254:2:3446
+255:2:3440
+256:2:3441
+257:2:3445
+258:2:3446
+259:2:3454
+260:2:3459
+261:2:3466
+262:2:3467
+263:2:3474
+264:2:3479
+265:2:3486
+266:2:3487
+267:2:3486
+268:2:3487
+269:2:3494
+270:2:3503
+271:0:4029
+272:2:2767
+273:0:4029
+274:1:2
+275:0:4029
+276:1:8
+277:0:4029
+278:1:9
+279:0:4029
+280:1:10
+281:0:4029
+282:1:11
+283:0:4029
+284:1:12
+285:1:13
+286:1:17
+287:1:18
+288:1:26
+289:1:27
+290:1:31
+291:1:32
+292:1:40
+293:1:45
+294:1:49
+295:1:50
+296:1:58
+297:1:59
+298:1:63
+299:1:64
+300:1:58
+301:1:59
+302:1:63
+303:1:64
+304:1:72
+305:1:77
+306:1:84
+307:1:85
+308:1:92
+309:1:97
+310:1:104
+311:1:105
+312:1:104
+313:1:105
+314:1:112
+315:0:4029
+316:1:11
+317:0:4029
+318:1:123
+319:1:124
+320:0:4029
+321:1:11
+322:0:4029
+323:1:130
+324:1:131
+325:1:135
+326:1:136
+327:1:144
+328:1:145
+329:1:149
+330:1:150
+331:1:158
+332:1:163
+333:1:167
+334:1:168
+335:1:176
+336:1:177
+337:1:181
+338:1:182
+339:1:176
+340:1:177
+341:1:181
+342:1:182
+343:1:190
+344:1:195
+345:1:202
+346:1:203
+347:1:210
+348:1:215
+349:1:222
+350:1:223
+351:1:222
+352:1:223
+353:1:230
+354:0:4029
+355:1:11
+356:0:4029
+357:1:241
+358:1:242
+359:1:246
+360:1:247
+361:1:255
+362:1:256
+363:1:260
+364:1:261
+365:1:269
+366:1:274
+367:1:278
+368:1:279
+369:1:287
+370:1:288
+371:1:292
+372:1:293
+373:1:287
+374:1:288
+375:1:292
+376:1:293
+377:1:301
+378:1:306
+379:1:313
+380:1:314
+381:1:321
+382:1:326
+383:1:333
+384:1:334
+385:1:333
+386:1:334
+387:1:341
+388:1:350
+389:0:4029
+390:1:11
+391:0:4029
+392:1:468
+393:1:472
+394:1:473
+395:1:477
+396:1:478
+397:1:486
+398:1:494
+399:1:495
+400:1:499
+401:1:503
+402:1:504
+403:1:499
+404:1:503
+405:1:504
+406:1:508
+407:1:515
+408:1:522
+409:1:523
+410:1:530
+411:1:535
+412:1:542
+413:1:543
+414:1:542
+415:1:543
+416:1:550
+417:0:4029
+418:1:11
+419:0:4029
+420:2:3507
+421:2:3516
+422:2:3517
+423:2:3521
+424:2:3522
+425:2:3526
+426:2:3527
+427:2:3535
+428:2:3540
+429:2:3544
+430:2:3545
+431:2:3553
+432:2:3554
+433:2:3558
+434:2:3559
+435:2:3553
+436:2:3554
+437:2:3558
+438:2:3559
+439:2:3567
+440:2:3574
+441:2:3575
+442:2:3579
+443:2:3580
+444:2:3587
+445:2:3592
+446:2:3599
+447:2:3600
+448:2:3599
+449:2:3600
+450:2:3607
+451:2:3617
+452:0:4029
+453:2:2767
+454:0:4029
+455:2:3623
+456:2:3632
+457:2:3633
+458:2:3637
+459:2:3638
+460:2:3642
+461:2:3643
+462:2:3651
+463:2:3656
+464:2:3660
+465:2:3661
+466:2:3669
+467:2:3670
+468:2:3674
+469:2:3675
+470:2:3669
+471:2:3670
+472:2:3674
+473:2:3675
+474:2:3683
+475:2:3690
+476:2:3691
+477:2:3695
+478:2:3696
+479:2:3703
+480:2:3708
+481:2:3715
+482:2:3716
+483:2:3715
+484:2:3716
+485:2:3723
+486:0:4029
+487:2:2767
+488:0:4029
+489:1:560
+490:1:561
+491:1:565
+492:1:566
+493:1:574
+494:1:575
+495:1:579
+496:1:580
+497:1:588
+498:1:593
+499:1:597
+500:1:598
+501:1:606
+502:1:607
+503:1:611
+504:1:612
+505:1:606
+506:1:607
+507:1:611
+508:1:612
+509:1:620
+510:1:625
+511:1:632
+512:1:633
+513:1:640
+514:1:645
+515:1:652
+516:1:653
+517:1:652
+518:1:653
+519:1:660
+520:0:4029
+521:1:11
+522:0:4029
+523:2:3507
+524:2:3516
+525:2:3517
+526:2:3521
+527:2:3522
+528:2:3526
+529:2:3527
+530:2:3535
+531:2:3540
+532:2:3544
+533:2:3545
+534:2:3553
+535:2:3554
+536:2:3558
+537:2:3559
+538:2:3553
+539:2:3554
+540:2:3558
+541:2:3559
+542:2:3567
+543:2:3574
+544:2:3575
+545:2:3579
+546:2:3580
+547:2:3587
+548:2:3592
+549:2:3599
+550:2:3600
+551:2:3599
+552:2:3600
+553:2:3607
+554:2:3617
+555:0:4029
+556:2:2767
+557:0:4029
+558:2:3623
+559:2:3632
+560:2:3633
+561:2:3637
+562:2:3638
+563:2:3642
+564:2:3643
+565:2:3651
+566:2:3656
+567:2:3660
+568:2:3661
+569:2:3669
+570:2:3670
+571:2:3674
+572:2:3675
+573:2:3669
+574:2:3670
+575:2:3674
+576:2:3675
+577:2:3683
+578:2:3690
+579:2:3691
+580:2:3695
+581:2:3696
+582:2:3703
+583:2:3708
+584:2:3715
+585:2:3716
+586:2:3715
+587:2:3716
+588:2:3723
+589:0:4029
+590:2:2767
+591:0:4029
+592:1:671
+593:1:674
+594:1:675
+595:0:4029
+596:1:11
+597:0:4029
+598:2:3507
+599:2:3516
+600:2:3517
+601:2:3521
+602:2:3522
+603:2:3526
+604:2:3527
+605:2:3535
+606:2:3540
+607:2:3544
+608:2:3545
+609:2:3553
+610:2:3554
+611:2:3558
+612:2:3559
+613:2:3553
+614:2:3554
+615:2:3558
+616:2:3559
+617:2:3567
+618:2:3574
+619:2:3575
+620:2:3579
+621:2:3580
+622:2:3587
+623:2:3592
+624:2:3599
+625:2:3600
+626:2:3599
+627:2:3600
+628:2:3607
+629:2:3617
+630:0:4029
+631:2:2767
+632:0:4029
+633:2:3623
+634:2:3632
+635:2:3633
+636:2:3637
+637:2:3638
+638:2:3642
+639:2:3643
+640:2:3651
+641:2:3656
+642:2:3660
+643:2:3661
+644:2:3669
+645:2:3670
+646:2:3674
+647:2:3675
+648:2:3669
+649:2:3670
+650:2:3674
+651:2:3675
+652:2:3683
+653:2:3690
+654:2:3691
+655:2:3695
+656:2:3696
+657:2:3703
+658:2:3708
+659:2:3715
+660:2:3716
+661:2:3715
+662:2:3716
+663:2:3723
+664:0:4029
+665:2:2767
+666:0:4029
+667:1:678
+668:1:679
+669:1:683
+670:1:684
+671:1:692
+672:1:693
+673:1:697
+674:1:698
+675:1:706
+676:1:711
+677:1:715
+678:1:716
+679:1:724
+680:1:725
+681:1:729
+682:1:730
+683:1:724
+684:1:725
+685:1:729
+686:1:730
+687:1:738
+688:1:743
+689:1:750
+690:1:751
+691:1:758
+692:1:763
+693:1:770
+694:1:771
+695:1:770
+696:1:771
+697:1:778
+698:0:4029
+699:1:11
+700:0:4029
+701:2:3507
+702:2:3516
+703:2:3517
+704:2:3521
+705:2:3522
+706:2:3526
+707:2:3527
+708:2:3535
+709:2:3540
+710:2:3544
+711:2:3545
+712:2:3553
+713:2:3554
+714:2:3558
+715:2:3559
+716:2:3553
+717:2:3554
+718:2:3558
+719:2:3559
+720:2:3567
+721:2:3574
+722:2:3575
+723:2:3579
+724:2:3580
+725:2:3587
+726:2:3592
+727:2:3599
+728:2:3600
+729:2:3599
+730:2:3600
+731:2:3607
+732:2:3617
+733:0:4029
+734:2:2767
+735:0:4029
+736:2:3623
+737:2:3632
+738:2:3633
+739:2:3637
+740:2:3638
+741:2:3642
+742:2:3643
+743:2:3651
+744:2:3656
+745:2:3660
+746:2:3661
+747:2:3669
+748:2:3670
+749:2:3674
+750:2:3675
+751:2:3669
+752:2:3670
+753:2:3674
+754:2:3675
+755:2:3683
+756:2:3690
+757:2:3691
+758:2:3695
+759:2:3696
+760:2:3703
+761:2:3708
+762:2:3715
+763:2:3716
+764:2:3715
+765:2:3716
+766:2:3723
+767:0:4029
+768:2:2767
+769:0:4029
+770:1:902
+771:1:903
+772:1:907
+773:1:908
+774:1:916
+775:1:917
+776:1:921
+777:1:922
+778:1:930
+779:1:935
+780:1:939
+781:1:940
+782:1:948
+783:1:949
+784:1:953
+785:1:954
+786:1:948
+787:1:949
+788:1:953
+789:1:954
+790:1:962
+791:1:967
+792:1:974
+793:1:975
+794:1:982
+795:1:987
+796:1:994
+797:1:995
+798:1:994
+799:1:995
+800:1:1002
+801:1:1011
+802:1:1015
+803:0:4029
+804:1:11
+805:0:4029
+806:2:3507
+807:2:3516
+808:2:3517
+809:2:3521
+810:2:3522
+811:2:3526
+812:2:3527
+813:2:3535
+814:2:3540
+815:2:3544
+816:2:3545
+817:2:3553
+818:2:3554
+819:2:3558
+820:2:3559
+821:2:3553
+822:2:3554
+823:2:3558
+824:2:3559
+825:2:3567
+826:2:3574
+827:2:3575
+828:2:3579
+829:2:3580
+830:2:3587
+831:2:3592
+832:2:3599
+833:2:3600
+834:2:3599
+835:2:3600
+836:2:3607
+837:2:3617
+838:0:4029
+839:2:2767
+840:0:4029
+841:2:3623
+842:2:3632
+843:2:3633
+844:2:3637
+845:2:3638
+846:2:3642
+847:2:3643
+848:2:3651
+849:2:3656
+850:2:3660
+851:2:3661
+852:2:3669
+853:2:3670
+854:2:3674
+855:2:3675
+856:2:3669
+857:2:3670
+858:2:3674
+859:2:3675
+860:2:3683
+861:2:3690
+862:2:3691
+863:2:3695
+864:2:3696
+865:2:3703
+866:2:3708
+867:2:3715
+868:2:3716
+869:2:3715
+870:2:3716
+871:2:3723
+872:0:4029
+873:2:2767
+874:0:4029
+875:1:1016
+876:1:1017
+877:1:1021
+878:1:1022
+879:1:1030
+880:1:1031
+881:1:1032
+882:1:1044
+883:1:1049
+884:1:1053
+885:1:1054
+886:1:1062
+887:1:1063
+888:1:1067
+889:1:1068
+890:1:1062
+891:1:1063
+892:1:1067
+893:1:1068
+894:1:1076
+895:1:1081
+896:1:1088
+897:1:1089
+898:1:1096
+899:1:1101
+900:1:1108
+901:1:1109
+902:1:1108
+903:1:1109
+904:1:1116
+905:0:4029
+906:1:11
+907:0:4029
+908:2:3507
+909:2:3516
+910:2:3517
+911:2:3521
+912:2:3522
+913:2:3526
+914:2:3527
+915:2:3535
+916:2:3540
+917:2:3544
+918:2:3545
+919:2:3553
+920:2:3554
+921:2:3558
+922:2:3559
+923:2:3553
+924:2:3554
+925:2:3558
+926:2:3559
+927:2:3567
+928:2:3574
+929:2:3575
+930:2:3579
+931:2:3580
+932:2:3587
+933:2:3592
+934:2:3599
+935:2:3600
+936:2:3599
+937:2:3600
+938:2:3607
+939:2:3617
+940:0:4029
+941:2:2767
+942:0:4029
+943:2:3623
+944:2:3632
+945:2:3633
+946:2:3637
+947:2:3638
+948:2:3642
+949:2:3643
+950:2:3651
+951:2:3656
+952:2:3660
+953:2:3661
+954:2:3669
+955:2:3670
+956:2:3674
+957:2:3675
+958:2:3669
+959:2:3670
+960:2:3674
+961:2:3675
+962:2:3683
+963:2:3690
+964:2:3691
+965:2:3695
+966:2:3696
+967:2:3703
+968:2:3708
+969:2:3715
+970:2:3716
+971:2:3715
+972:2:3716
+973:2:3723
+974:0:4029
+975:2:2767
+976:0:4029
+977:1:1127
+978:0:4029
+979:2:3507
+980:2:3516
+981:2:3517
+982:2:3521
+983:2:3522
+984:2:3526
+985:2:3527
+986:2:3535
+987:2:3540
+988:2:3544
+989:2:3545
+990:2:3553
+991:2:3554
+992:2:3558
+993:2:3559
+994:2:3553
+995:2:3554
+996:2:3558
+997:2:3559
+998:2:3567
+999:2:3574
+1000:2:3575
+1001:2:3579
+1002:2:3580
+1003:2:3587
+1004:2:3592
+1005:2:3599
+1006:2:3600
+1007:2:3599
+1008:2:3600
+1009:2:3607
+1010:2:3617
+1011:0:4029
+1012:2:2767
+1013:0:4029
+1014:2:3623
+1015:2:3632
+1016:2:3633
+1017:2:3637
+1018:2:3638
+1019:2:3642
+1020:2:3643
+1021:2:3651
+1022:2:3656
+1023:2:3660
+1024:2:3661
+1025:2:3669
+1026:2:3670
+1027:2:3674
+1028:2:3675
+1029:2:3669
+1030:2:3670
+1031:2:3674
+1032:2:3675
+1033:2:3683
+1034:2:3690
+1035:2:3691
+1036:2:3695
+1037:2:3696
+1038:2:3703
+1039:2:3708
+1040:2:3715
+1041:2:3716
+1042:2:3715
+1043:2:3716
+1044:2:3723
+1045:0:4029
+1046:2:2767
+1047:0:4029
+1048:1:2663
+1049:1:2670
+1050:1:2671
+1051:1:2678
+1052:1:2683
+1053:1:2690
+1054:1:2691
+1055:1:2690
+1056:1:2691
+1057:1:2698
+1058:1:2702
+1059:0:4029
+1060:2:3507
+1061:2:3516
+1062:2:3517
+1063:2:3521
+1064:2:3522
+1065:2:3526
+1066:2:3527
+1067:2:3535
+1068:2:3540
+1069:2:3544
+1070:2:3545
+1071:2:3553
+1072:2:3554
+1073:2:3558
+1074:2:3559
+1075:2:3553
+1076:2:3554
+1077:2:3558
+1078:2:3559
+1079:2:3567
+1080:2:3574
+1081:2:3575
+1082:2:3579
+1083:2:3580
+1084:2:3587
+1085:2:3592
+1086:2:3599
+1087:2:3600
+1088:2:3599
+1089:2:3600
+1090:2:3607
+1091:2:3617
+1092:0:4029
+1093:2:2767
+1094:0:4029
+1095:2:3623
+1096:2:3632
+1097:2:3633
+1098:2:3637
+1099:2:3638
+1100:2:3642
+1101:2:3643
+1102:2:3651
+1103:2:3656
+1104:2:3660
+1105:2:3661
+1106:2:3669
+1107:2:3670
+1108:2:3674
+1109:2:3675
+1110:2:3669
+1111:2:3670
+1112:2:3674
+1113:2:3675
+1114:2:3683
+1115:2:3690
+1116:2:3691
+1117:2:3695
+1118:2:3696
+1119:2:3703
+1120:2:3708
+1121:2:3715
+1122:2:3716
+1123:2:3715
+1124:2:3716
+1125:2:3723
+1126:0:4029
+1127:2:2767
+1128:0:4029
+1129:1:1129
+1130:1:1130
+1131:0:4029
+1132:1:11
+1133:0:4029
+1134:2:3507
+1135:2:3516
+1136:2:3517
+1137:2:3521
+1138:2:3522
+1139:2:3526
+1140:2:3527
+1141:2:3535
+1142:2:3540
+1143:2:3544
+1144:2:3545
+1145:2:3553
+1146:2:3554
+1147:2:3558
+1148:2:3559
+1149:2:3553
+1150:2:3554
+1151:2:3558
+1152:2:3559
+1153:2:3567
+1154:2:3574
+1155:2:3575
+1156:2:3579
+1157:2:3580
+1158:2:3587
+1159:2:3592
+1160:2:3599
+1161:2:3600
+1162:2:3599
+1163:2:3600
+1164:2:3607
+1165:2:3617
+1166:0:4029
+1167:2:2767
+1168:0:4029
+1169:2:3623
+1170:2:3632
+1171:2:3633
+1172:2:3637
+1173:2:3638
+1174:2:3642
+1175:2:3643
+1176:2:3651
+1177:2:3656
+1178:2:3660
+1179:2:3661
+1180:2:3669
+1181:2:3670
+1182:2:3674
+1183:2:3675
+1184:2:3669
+1185:2:3670
+1186:2:3674
+1187:2:3675
+1188:2:3683
+1189:2:3690
+1190:2:3691
+1191:2:3695
+1192:2:3696
+1193:2:3703
+1194:2:3708
+1195:2:3715
+1196:2:3716
+1197:2:3715
+1198:2:3716
+1199:2:3723
+1200:0:4029
+1201:2:2767
+1202:0:4029
+1203:1:1131
+1204:1:1132
+1205:1:1136
+1206:1:1137
+1207:1:1145
+1208:1:1146
+1209:1:1150
+1210:1:1151
+1211:1:1159
+1212:1:1164
+1213:1:1168
+1214:1:1169
+1215:1:1177
+1216:1:1178
+1217:1:1182
+1218:1:1183
+1219:1:1177
+1220:1:1178
+1221:1:1182
+1222:1:1183
+1223:1:1191
+1224:1:1196
+1225:1:1203
+1226:1:1204
+1227:1:1211
+1228:1:1216
+1229:1:1223
+1230:1:1224
+1231:1:1223
+1232:1:1224
+1233:1:1231
+1234:0:4029
+1235:1:11
+1236:0:4029
+1237:2:3507
+1238:2:3516
+1239:2:3517
+1240:2:3521
+1241:2:3522
+1242:2:3526
+1243:2:3527
+1244:2:3535
+1245:2:3540
+1246:2:3544
+1247:2:3545
+1248:2:3553
+1249:2:3554
+1250:2:3558
+1251:2:3559
+1252:2:3553
+1253:2:3554
+1254:2:3558
+1255:2:3559
+1256:2:3567
+1257:2:3574
+1258:2:3575
+1259:2:3579
+1260:2:3580
+1261:2:3587
+1262:2:3592
+1263:2:3599
+1264:2:3600
+1265:2:3599
+1266:2:3600
+1267:2:3607
+1268:2:3617
+1269:0:4029
+1270:2:2767
+1271:0:4029
+1272:2:3623
+1273:2:3632
+1274:2:3633
+1275:2:3637
+1276:2:3638
+1277:2:3642
+1278:2:3643
+1279:2:3651
+1280:2:3656
+1281:2:3660
+1282:2:3661
+1283:2:3669
+1284:2:3670
+1285:2:3674
+1286:2:3675
+1287:2:3669
+1288:2:3670
+1289:2:3674
+1290:2:3675
+1291:2:3683
+1292:2:3690
+1293:2:3691
+1294:2:3695
+1295:2:3696
+1296:2:3703
+1297:2:3708
+1298:2:3715
+1299:2:3716
+1300:2:3715
+1301:2:3716
+1302:2:3723
+1303:0:4029
+1304:2:2767
+1305:0:4029
+1306:1:1242
+1307:1:1243
+1308:1:1247
+1309:1:1248
+1310:1:1256
+1311:1:1257
+1312:1:1261
+1313:1:1262
+1314:1:1270
+1315:1:1275
+1316:1:1279
+1317:1:1280
+1318:1:1288
+1319:1:1289
+1320:1:1293
+1321:1:1294
+1322:1:1288
+1323:1:1289
+1324:1:1293
+1325:1:1294
+1326:1:1302
+1327:1:1307
+1328:1:1314
+1329:1:1315
+1330:1:1322
+1331:1:1327
+1332:1:1334
+1333:1:1335
+1334:1:1334
+1335:1:1335
+1336:1:1342
+1337:1:1351
+1338:1:1355
+1339:0:4029
+1340:1:11
+1341:0:4029
+1342:2:3507
+1343:2:3516
+1344:2:3517
+1345:2:3521
+1346:2:3522
+1347:2:3526
+1348:2:3527
+1349:2:3535
+1350:2:3540
+1351:2:3544
+1352:2:3545
+1353:2:3553
+1354:2:3554
+1355:2:3558
+1356:2:3559
+1357:2:3553
+1358:2:3554
+1359:2:3558
+1360:2:3559
+1361:2:3567
+1362:2:3574
+1363:2:3575
+1364:2:3579
+1365:2:3580
+1366:2:3587
+1367:2:3592
+1368:2:3599
+1369:2:3600
+1370:2:3599
+1371:2:3600
+1372:2:3607
+1373:2:3617
+1374:0:4029
+1375:2:2767
+1376:0:4029
+1377:2:3623
+1378:2:3632
+1379:2:3633
+1380:2:3637
+1381:2:3638
+1382:2:3642
+1383:2:3643
+1384:2:3651
+1385:2:3656
+1386:2:3660
+1387:2:3661
+1388:2:3669
+1389:2:3670
+1390:2:3674
+1391:2:3675
+1392:2:3669
+1393:2:3670
+1394:2:3674
+1395:2:3675
+1396:2:3683
+1397:2:3690
+1398:2:3691
+1399:2:3695
+1400:2:3696
+1401:2:3703
+1402:2:3708
+1403:2:3715
+1404:2:3716
+1405:2:3715
+1406:2:3716
+1407:2:3723
+1408:0:4029
+1409:2:2767
+1410:0:4029
+1411:1:1356
+1412:1:1360
+1413:1:1361
+1414:1:1365
+1415:1:1366
+1416:1:1374
+1417:1:1382
+1418:1:1383
+1419:1:1387
+1420:1:1391
+1421:1:1392
+1422:1:1387
+1423:1:1391
+1424:1:1392
+1425:1:1396
+1426:1:1403
+1427:1:1410
+1428:1:1411
+1429:1:1418
+1430:1:1423
+1431:1:1430
+1432:1:1431
+1433:1:1430
+1434:1:1431
+1435:1:1438
+1436:0:4029
+1437:1:11
+1438:0:4029
+1439:2:3507
+1440:2:3516
+1441:2:3517
+1442:2:3521
+1443:2:3522
+1444:2:3526
+1445:2:3527
+1446:2:3535
+1447:2:3540
+1448:2:3544
+1449:2:3545
+1450:2:3553
+1451:2:3554
+1452:2:3558
+1453:2:3559
+1454:2:3553
+1455:2:3554
+1456:2:3558
+1457:2:3559
+1458:2:3567
+1459:2:3574
+1460:2:3575
+1461:2:3579
+1462:2:3580
+1463:2:3587
+1464:2:3592
+1465:2:3599
+1466:2:3600
+1467:2:3599
+1468:2:3600
+1469:2:3607
+1470:2:3617
+1471:0:4029
+1472:2:2767
+1473:0:4029
+1474:2:3623
+1475:2:3632
+1476:2:3633
+1477:2:3637
+1478:2:3638
+1479:2:3642
+1480:2:3643
+1481:2:3651
+1482:2:3656
+1483:2:3660
+1484:2:3661
+1485:2:3669
+1486:2:3670
+1487:2:3674
+1488:2:3675
+1489:2:3669
+1490:2:3670
+1491:2:3674
+1492:2:3675
+1493:2:3683
+1494:2:3690
+1495:2:3691
+1496:2:3695
+1497:2:3696
+1498:2:3703
+1499:2:3708
+1500:2:3715
+1501:2:3716
+1502:2:3715
+1503:2:3716
+1504:2:3723
+1505:0:4029
+1506:2:2767
+1507:0:4029
+1508:1:1448
+1509:1:1449
+1510:1:1453
+1511:1:1454
+1512:1:1462
+1513:1:1463
+1514:1:1467
+1515:1:1468
+1516:1:1476
+1517:1:1481
+1518:1:1485
+1519:1:1486
+1520:1:1494
+1521:1:1495
+1522:1:1499
+1523:1:1500
+1524:1:1494
+1525:1:1495
+1526:1:1499
+1527:1:1500
+1528:1:1508
+1529:1:1513
+1530:1:1520
+1531:1:1521
+1532:1:1528
+1533:1:1533
+1534:1:1540
+1535:1:1541
+1536:1:1540
+1537:1:1541
+1538:1:1548
+1539:0:4029
+1540:1:11
+1541:0:4029
+1542:2:3507
+1543:2:3516
+1544:2:3517
+1545:2:3521
+1546:2:3522
+1547:2:3526
+1548:2:3527
+1549:2:3535
+1550:2:3540
+1551:2:3544
+1552:2:3545
+1553:2:3553
+1554:2:3554
+1555:2:3558
+1556:2:3559
+1557:2:3553
+1558:2:3554
+1559:2:3558
+1560:2:3559
+1561:2:3567
+1562:2:3574
+1563:2:3575
+1564:2:3579
+1565:2:3580
+1566:2:3587
+1567:2:3592
+1568:2:3599
+1569:2:3600
+1570:2:3599
+1571:2:3600
+1572:2:3607
+1573:2:3617
+1574:0:4029
+1575:2:2767
+1576:0:4029
+1577:2:3623
+1578:2:3632
+1579:2:3633
+1580:2:3637
+1581:2:3638
+1582:2:3642
+1583:2:3643
+1584:2:3651
+1585:2:3656
+1586:2:3660
+1587:2:3661
+1588:2:3669
+1589:2:3670
+1590:2:3674
+1591:2:3675
+1592:2:3669
+1593:2:3670
+1594:2:3674
+1595:2:3675
+1596:2:3683
+1597:2:3690
+1598:2:3691
+1599:2:3695
+1600:2:3696
+1601:2:3703
+1602:2:3708
+1603:2:3715
+1604:2:3716
+1605:2:3715
+1606:2:3716
+1607:2:3723
+1608:0:4029
+1609:2:2767
+1610:0:4029
+1611:1:1559
+1612:1:1560
+1613:1:1564
+1614:1:1565
+1615:1:1573
+1616:1:1574
+1617:1:1578
+1618:1:1579
+1619:1:1587
+1620:1:1592
+1621:1:1596
+1622:1:1597
+1623:1:1605
+1624:1:1606
+1625:1:1610
+1626:1:1611
+1627:1:1605
+1628:1:1606
+1629:1:1610
+1630:1:1611
+1631:1:1619
+1632:1:1624
+1633:1:1631
+1634:1:1632
+1635:1:1639
+1636:1:1644
+1637:1:1651
+1638:1:1652
+1639:1:1651
+1640:1:1652
+1641:1:1659
+1642:1:1668
+1643:1:1672
+1644:0:4029
+1645:1:11
+1646:0:4029
+1647:2:3507
+1648:2:3516
+1649:2:3517
+1650:2:3521
+1651:2:3522
+1652:2:3526
+1653:2:3527
+1654:2:3535
+1655:2:3540
+1656:2:3544
+1657:2:3545
+1658:2:3553
+1659:2:3554
+1660:2:3558
+1661:2:3559
+1662:2:3553
+1663:2:3554
+1664:2:3558
+1665:2:3559
+1666:2:3567
+1667:2:3574
+1668:2:3575
+1669:2:3579
+1670:2:3580
+1671:2:3587
+1672:2:3592
+1673:2:3599
+1674:2:3600
+1675:2:3599
+1676:2:3600
+1677:2:3607
+1678:2:3617
+1679:0:4029
+1680:2:2767
+1681:0:4029
+1682:2:3623
+1683:2:3632
+1684:2:3633
+1685:2:3637
+1686:2:3638
+1687:2:3642
+1688:2:3643
+1689:2:3651
+1690:2:3656
+1691:2:3660
+1692:2:3661
+1693:2:3669
+1694:2:3670
+1695:2:3674
+1696:2:3675
+1697:2:3669
+1698:2:3670
+1699:2:3674
+1700:2:3675
+1701:2:3683
+1702:2:3690
+1703:2:3691
+1704:2:3695
+1705:2:3696
+1706:2:3703
+1707:2:3708
+1708:2:3715
+1709:2:3716
+1710:2:3715
+1711:2:3716
+1712:2:3723
+1713:0:4029
+1714:2:2767
+1715:0:4029
+1716:1:1673
+1717:1:1674
+1718:1:1678
+1719:1:1679
+1720:1:1687
+1721:1:1688
+1722:1:1689
+1723:1:1701
+1724:1:1706
+1725:1:1710
+1726:1:1711
+1727:1:1719
+1728:1:1720
+1729:1:1724
+1730:1:1725
+1731:1:1719
+1732:1:1720
+1733:1:1724
+1734:1:1725
+1735:1:1733
+1736:1:1738
+1737:1:1745
+1738:1:1746
+1739:1:1753
+1740:1:1758
+1741:1:1765
+1742:1:1766
+1743:1:1765
+1744:1:1766
+1745:1:1773
+1746:0:4029
+1747:1:11
+1748:0:4029
+1749:1:1784
+1750:1:1785
+1751:0:4029
+1752:1:11
+1753:0:4029
+1754:1:1791
+1755:1:1792
+1756:1:1796
+1757:1:1797
+1758:1:1805
+1759:1:1806
+1760:1:1810
+1761:1:1811
+1762:1:1819
+1763:1:1824
+1764:1:1828
+1765:1:1829
+1766:1:1837
+1767:1:1838
+1768:1:1842
+1769:1:1843
+1770:1:1837
+1771:1:1838
+1772:1:1842
+1773:1:1843
+1774:1:1851
+1775:1:1856
+1776:1:1863
+1777:1:1864
+1778:1:1871
+1779:1:1876
+1780:1:1883
+1781:1:1884
+1782:1:1883
+1783:1:1884
+1784:1:1891
+1785:0:4029
+1786:1:11
+1787:0:4029
+1788:1:1902
+1789:1:1903
+1790:1:1907
+1791:1:1908
+1792:1:1916
+1793:1:1917
+1794:1:1921
+1795:1:1922
+1796:1:1930
+1797:1:1935
+1798:1:1939
+1799:1:1940
+1800:1:1948
+1801:1:1949
+1802:1:1953
+1803:1:1954
+1804:1:1948
+1805:1:1949
+1806:1:1953
+1807:1:1954
+1808:1:1962
+1809:1:1967
+1810:1:1974
+1811:1:1975
+1812:1:1982
+1813:1:1987
+1814:1:1994
+1815:1:1995
+1816:1:1994
+1817:1:1995
+1818:1:2002
+1819:1:2011
+1820:0:4029
+1821:1:11
+1822:0:4029
+1823:1:2129
+1824:1:2133
+1825:1:2134
+1826:1:2138
+1827:1:2139
+1828:1:2147
+1829:1:2155
+1830:1:2156
+1831:1:2160
+1832:1:2164
+1833:1:2165
+1834:1:2160
+1835:1:2164
+1836:1:2165
+1837:1:2169
+1838:1:2176
+1839:1:2183
+1840:1:2184
+1841:1:2191
+1842:1:2196
+1843:1:2203
+1844:1:2204
+1845:1:2203
+1846:1:2204
+1847:1:2211
+1848:0:4029
+1849:1:11
+1850:0:4029
+1851:2:3507
+1852:2:3516
+1853:2:3517
+1854:2:3521
+1855:2:3522
+1856:2:3526
+1857:2:3527
+1858:2:3535
+1859:2:3540
+1860:2:3544
+1861:2:3545
+1862:2:3553
+1863:2:3554
+1864:2:3558
+1865:2:3559
+1866:2:3553
+1867:2:3554
+1868:2:3558
+1869:2:3559
+1870:2:3567
+1871:2:3574
+1872:2:3575
+1873:2:3579
+1874:2:3580
+1875:2:3587
+1876:2:3592
+1877:2:3599
+1878:2:3600
+1879:2:3599
+1880:2:3600
+1881:2:3607
+1882:2:3617
+1883:0:4029
+1884:2:2767
+1885:0:4029
+1886:2:3623
+1887:2:3632
+1888:2:3633
+1889:2:3637
+1890:2:3638
+1891:2:3642
+1892:2:3643
+1893:2:3651
+1894:2:3656
+1895:2:3660
+1896:2:3661
+1897:2:3669
+1898:2:3670
+1899:2:3674
+1900:2:3675
+1901:2:3669
+1902:2:3670
+1903:2:3674
+1904:2:3675
+1905:2:3683
+1906:2:3690
+1907:2:3691
+1908:2:3695
+1909:2:3696
+1910:2:3703
+1911:2:3708
+1912:2:3715
+1913:2:3716
+1914:2:3715
+1915:2:3716
+1916:2:3723
+1917:0:4029
+1918:2:2767
+1919:0:4029
+1920:1:2221
+1921:1:2222
+1922:1:2226
+1923:1:2227
+1924:1:2235
+1925:1:2236
+1926:1:2240
+1927:1:2241
+1928:1:2249
+1929:1:2254
+1930:1:2258
+1931:1:2259
+1932:1:2267
+1933:1:2268
+1934:1:2272
+1935:1:2273
+1936:1:2267
+1937:1:2268
+1938:1:2272
+1939:1:2273
+1940:1:2281
+1941:1:2286
+1942:1:2293
+1943:1:2294
+1944:1:2301
+1945:1:2306
+1946:1:2313
+1947:1:2314
+1948:1:2313
+1949:1:2314
+1950:1:2321
+-1:-1:-1
+1951:0:4029
+1952:1:11
+1953:0:4029
+1954:2:3507
+1955:2:3516
+1956:2:3517
+1957:2:3521
+1958:2:3522
+1959:2:3526
+1960:2:3527
+1961:2:3535
+1962:2:3540
+1963:2:3544
+1964:2:3545
+1965:2:3553
+1966:2:3554
+1967:2:3558
+1968:2:3559
+1969:2:3553
+1970:2:3554
+1971:2:3558
+1972:2:3559
+1973:2:3567
+1974:2:3574
+1975:2:3575
+1976:2:3579
+1977:2:3580
+1978:2:3587
+1979:2:3592
+1980:2:3599
+1981:2:3600
+1982:2:3599
+1983:2:3600
+1984:2:3607
+1985:2:3617
+1986:0:4029
+1987:2:2767
+1988:0:4029
+1989:2:3623
+1990:2:3632
+1991:2:3633
+1992:2:3637
+1993:2:3638
+1994:2:3642
+1995:2:3643
+1996:2:3651
+1997:2:3656
+1998:2:3660
+1999:2:3661
+2000:2:3669
+2001:2:3670
+2002:2:3674
+2003:2:3675
+2004:2:3669
+2005:2:3670
+2006:2:3674
+2007:2:3675
+2008:2:3683
+2009:2:3690
+2010:2:3691
+2011:2:3695
+2012:2:3696
+2013:2:3703
+2014:2:3708
+2015:2:3715
+2016:2:3716
+2017:2:3715
+2018:2:3716
+2019:2:3723
+2020:0:4029
+2021:2:2767
+2022:0:4029
+2023:1:2332
+2024:0:4029
+2025:2:3507
+2026:2:3516
+2027:2:3517
+2028:2:3521
+2029:2:3522
+2030:2:3526
+2031:2:3527
+2032:2:3535
+2033:2:3540
+2034:2:3544
+2035:2:3545
+2036:2:3553
+2037:2:3554
+2038:2:3558
+2039:2:3559
+2040:2:3553
+2041:2:3554
+2042:2:3558
+2043:2:3559
+2044:2:3567
+2045:2:3574
+2046:2:3575
+2047:2:3579
+2048:2:3580
+2049:2:3587
+2050:2:3592
+2051:2:3599
+2052:2:3600
+2053:2:3599
+2054:2:3600
+2055:2:3607
+2056:2:3617
+2057:0:4029
+2058:2:2767
+2059:0:4029
+2060:2:3623
+2061:2:3632
+2062:2:3633
+2063:2:3637
+2064:2:3638
+2065:2:3642
+2066:2:3643
+2067:2:3651
+2068:2:3656
+2069:2:3660
+2070:2:3661
+2071:2:3669
+2072:2:3670
+2073:2:3674
+2074:2:3675
+2075:2:3669
+2076:2:3670
+2077:2:3674
+2078:2:3675
+2079:2:3683
+2080:2:3690
+2081:2:3691
+2082:2:3695
+2083:2:3696
+2084:2:3703
+2085:2:3708
+2086:2:3715
+2087:2:3716
+2088:2:3715
+2089:2:3716
+2090:2:3723
+2091:0:4029
+2092:2:2767
+2093:0:4029
+2094:1:2706
+2095:1:2713
+2096:1:2714
+2097:1:2721
+2098:1:2726
+2099:1:2733
+2100:1:2734
+2101:1:2733
+2102:1:2734
+2103:1:2741
+2104:1:2745
+2105:0:4029
+2106:2:3507
+2107:2:3516
+2108:2:3517
+2109:2:3521
+2110:2:3522
+2111:2:3526
+2112:2:3527
+2113:2:3535
+2114:2:3540
+2115:2:3544
+2116:2:3545
+2117:2:3553
+2118:2:3554
+2119:2:3558
+2120:2:3559
+2121:2:3553
+2122:2:3554
+2123:2:3558
+2124:2:3559
+2125:2:3567
+2126:2:3574
+2127:2:3575
+2128:2:3579
+2129:2:3580
+2130:2:3587
+2131:2:3592
+2132:2:3599
+2133:2:3600
+2134:2:3599
+2135:2:3600
+2136:2:3607
+2137:2:3617
+2138:0:4029
+2139:2:2767
+2140:0:4029
+2141:2:3623
+2142:2:3632
+2143:2:3633
+2144:2:3637
+2145:2:3638
+2146:2:3642
+2147:2:3643
+2148:2:3651
+2149:2:3656
+2150:2:3660
+2151:2:3661
+2152:2:3669
+2153:2:3670
+2154:2:3674
+2155:2:3675
+2156:2:3669
+2157:2:3670
+2158:2:3674
+2159:2:3675
+2160:2:3683
+2161:2:3690
+2162:2:3691
+2163:2:3695
+2164:2:3696
+2165:2:3703
+2166:2:3708
+2167:2:3715
+2168:2:3716
+2169:2:3715
+2170:2:3716
+2171:2:3723
+2172:0:4029
+2173:2:2767
+2174:0:4029
+2175:1:2334
+2176:1:2335
+2177:0:4029
+2178:1:11
+2179:0:4029
+2180:2:3507
+2181:2:3516
+2182:2:3517
+2183:2:3521
+2184:2:3522
+2185:2:3526
+2186:2:3527
+2187:2:3535
+2188:2:3540
+2189:2:3544
+2190:2:3545
+2191:2:3553
+2192:2:3554
+2193:2:3558
+2194:2:3559
+2195:2:3553
+2196:2:3554
+2197:2:3558
+2198:2:3559
+2199:2:3567
+2200:2:3574
+2201:2:3575
+2202:2:3579
+2203:2:3580
+2204:2:3587
+2205:2:3592
+2206:2:3599
+2207:2:3600
+2208:2:3599
+2209:2:3600
+2210:2:3607
+2211:2:3617
+2212:0:4029
+2213:2:2767
+2214:0:4029
+2215:2:3623
+2216:2:3632
+2217:2:3633
+2218:2:3637
+2219:2:3638
+2220:2:3642
+2221:2:3643
+2222:2:3651
+2223:2:3656
+2224:2:3660
+2225:2:3661
+2226:2:3669
+2227:2:3670
+2228:2:3674
+2229:2:3675
+2230:2:3669
+2231:2:3670
+2232:2:3674
+2233:2:3675
+2234:2:3683
+2235:2:3690
+2236:2:3691
+2237:2:3695
+2238:2:3696
+2239:2:3703
+2240:2:3708
+2241:2:3715
+2242:2:3716
+2243:2:3715
+2244:2:3716
+2245:2:3723
+2246:0:4029
+2247:2:2767
+2248:0:4029
+2249:1:2336
+2250:1:2340
+2251:1:2341
+2252:1:2345
+2253:1:2349
+2254:1:2350
+2255:1:2354
+2256:1:2362
+2257:1:2363
+2258:1:2367
+2259:1:2371
+2260:1:2372
+2261:1:2367
+2262:1:2371
+2263:1:2372
+2264:1:2376
+2265:1:2383
+2266:1:2390
+2267:1:2391
+2268:1:2398
+2269:1:2403
+2270:1:2410
+2271:1:2411
+2272:1:2410
+2273:1:2411
+2274:1:2418
+2275:0:4029
+2276:1:11
+2277:0:4029
+2278:2:3507
+2279:2:3516
+2280:2:3517
+2281:2:3521
+2282:2:3522
+2283:2:3526
+2284:2:3527
+2285:2:3535
+2286:2:3540
+2287:2:3544
+2288:2:3545
+2289:2:3553
+2290:2:3554
+2291:2:3558
+2292:2:3559
+2293:2:3553
+2294:2:3554
+2295:2:3558
+2296:2:3559
+2297:2:3567
+2298:2:3574
+2299:2:3575
+2300:2:3579
+2301:2:3580
+2302:2:3587
+2303:2:3592
+2304:2:3599
+2305:2:3600
+2306:2:3599
+2307:2:3600
+2308:2:3607
+2309:2:3617
+2310:0:4029
+2311:2:2767
+2312:0:4029
+2313:2:3623
+2314:2:3632
+2315:2:3633
+2316:2:3637
+2317:2:3638
+2318:2:3642
+2319:2:3643
+2320:2:3651
+2321:2:3656
+2322:2:3660
+2323:2:3661
+2324:2:3669
+2325:2:3670
+2326:2:3674
+2327:2:3675
+2328:2:3669
+2329:2:3670
+2330:2:3674
+2331:2:3675
+2332:2:3683
+2333:2:3690
+2334:2:3691
+2335:2:3695
+2336:2:3696
+2337:2:3703
+2338:2:3708
+2339:2:3715
+2340:2:3716
+2341:2:3715
+2342:2:3716
+2343:2:3723
+2344:0:4029
+2345:2:2767
+2346:0:4029
+2347:1:2428
+2348:1:2429
+2349:1:2433
+2350:1:2434
+2351:1:2442
+2352:1:2443
+2353:1:2447
+2354:1:2448
+2355:1:2456
+2356:1:2461
+2357:1:2465
+2358:1:2466
+2359:1:2474
+2360:1:2475
+2361:1:2479
+2362:1:2480
+2363:1:2474
+2364:1:2475
+2365:1:2479
+2366:1:2480
+2367:1:2488
+2368:1:2493
+2369:1:2500
+2370:1:2501
+2371:1:2508
+2372:1:2513
+2373:1:2520
+2374:1:2521
+2375:1:2520
+2376:1:2521
+2377:1:2528
+2378:0:4029
+2379:1:11
+2380:0:4029
+2381:2:3507
+2382:2:3516
+2383:2:3517
+2384:2:3521
+2385:2:3522
+2386:2:3526
+2387:2:3527
+2388:2:3535
+2389:2:3540
+2390:2:3544
+2391:2:3545
+2392:2:3553
+2393:2:3554
+2394:2:3558
+2395:2:3559
+2396:2:3553
+2397:2:3554
+2398:2:3558
+2399:2:3559
+2400:2:3567
+2401:2:3574
+2402:2:3575
+2403:2:3579
+2404:2:3580
+2405:2:3587
+2406:2:3592
+2407:2:3599
+2408:2:3600
+2409:2:3599
+2410:2:3600
+2411:2:3607
+2412:2:3617
+2413:0:4029
+2414:2:2767
+2415:0:4029
+2416:2:3623
+2417:2:3632
+2418:2:3633
+2419:2:3637
+2420:2:3638
+2421:2:3642
+2422:2:3643
+2423:2:3651
+2424:2:3656
+2425:2:3660
+2426:2:3661
+2427:2:3669
+2428:2:3670
+2429:2:3674
+2430:2:3675
+2431:2:3669
+2432:2:3670
+2433:2:3674
+2434:2:3675
+2435:2:3683
+2436:2:3690
+2437:2:3691
+2438:2:3695
+2439:2:3696
+2440:2:3703
+2441:2:3708
+2442:2:3715
+2443:2:3716
+2444:2:3715
+2445:2:3716
+2446:2:3723
+2447:0:4029
+2448:2:2767
+2449:0:4029
+2450:1:2539
+2451:1:2540
+2452:1:2544
+2453:1:2545
+2454:1:2553
+2455:1:2554
+2456:1:2558
+2457:1:2559
+2458:1:2567
+2459:1:2572
+2460:1:2576
+2461:1:2577
+2462:1:2585
+2463:1:2586
+2464:1:2590
+2465:1:2591
+2466:1:2585
+2467:1:2586
+2468:1:2590
+2469:1:2591
+2470:1:2599
+2471:1:2604
+2472:1:2611
+2473:1:2612
+2474:1:2619
+2475:1:2624
+2476:1:2631
+2477:1:2632
+2478:1:2631
+2479:1:2632
+2480:1:2639
+2481:1:2648
+2482:1:2652
+2483:0:4029
+2484:1:11
+2485:0:4029
+2486:2:3507
+2487:2:3516
+2488:2:3517
+2489:2:3521
+2490:2:3522
+2491:2:3526
+2492:2:3527
+2493:2:3535
+2494:2:3540
+2495:2:3544
+2496:2:3545
+2497:2:3553
+2498:2:3554
+2499:2:3558
+2500:2:3559
+2501:2:3553
+2502:2:3554
+2503:2:3558
+2504:2:3559
+2505:2:3567
+2506:2:3574
+2507:2:3575
+2508:2:3579
+2509:2:3580
+2510:2:3587
+2511:2:3592
+2512:2:3599
+2513:2:3600
+2514:2:3599
+2515:2:3600
+2516:2:3607
+2517:2:3617
+2518:0:4029
+2519:2:2767
+2520:0:4029
+2521:2:3623
+2522:2:3632
+2523:2:3633
+2524:2:3637
+2525:2:3638
+2526:2:3642
+2527:2:3643
+2528:2:3651
+2529:2:3656
+2530:2:3660
+2531:2:3661
+2532:2:3669
+2533:2:3670
+2534:2:3674
+2535:2:3675
+2536:2:3669
+2537:2:3670
+2538:2:3674
+2539:2:3675
+2540:2:3683
+2541:2:3690
+2542:2:3691
+2543:2:3695
+2544:2:3696
+2545:2:3703
+2546:2:3708
+2547:2:3715
+2548:2:3716
+2549:2:3715
+2550:2:3716
+2551:2:3723
+2552:0:4029
+2553:2:2767
+2554:0:4029
+2555:1:2653
+2556:0:4029
+2557:1:2661
+2558:0:4029
+2559:1:2749
+2560:0:4029
+2561:1:9
+2562:0:4029
+2563:2:3507
+2564:2:3516
+2565:2:3517
+2566:2:3521
+2567:2:3522
+2568:2:3526
+2569:2:3527
+2570:2:3535
+2571:2:3540
+2572:2:3544
+2573:2:3545
+2574:2:3553
+2575:2:3554
+2576:2:3558
+2577:2:3559
+2578:2:3553
+2579:2:3554
+2580:2:3558
+2581:2:3559
+2582:2:3567
+2583:2:3574
+2584:2:3575
+2585:2:3579
+2586:2:3580
+2587:2:3587
+2588:2:3592
+2589:2:3599
+2590:2:3600
+2591:2:3599
+2592:2:3600
+2593:2:3607
+2594:2:3617
+2595:0:4029
+2596:2:2767
+2597:0:4029
+2598:2:3623
+2599:2:3632
+2600:2:3633
+2601:2:3637
+2602:2:3638
+2603:2:3642
+2604:2:3643
+2605:2:3651
+2606:2:3656
+2607:2:3660
+2608:2:3661
+2609:2:3669
+2610:2:3670
+2611:2:3674
+2612:2:3675
+2613:2:3669
+2614:2:3670
+2615:2:3674
+2616:2:3675
+2617:2:3683
+2618:2:3690
+2619:2:3691
+2620:2:3695
+2621:2:3696
+2622:2:3703
+2623:2:3708
+2624:2:3715
+2625:2:3716
+2626:2:3715
+2627:2:3716
+2628:2:3723
+2629:0:4029
+2630:2:2767
+2631:0:4029
+2632:1:10
+2633:0:4029
+2634:1:11
+2635:0:4029
+2636:2:3507
+2637:2:3516
+2638:2:3517
+2639:2:3521
+2640:2:3522
+2641:2:3526
+2642:2:3527
+2643:2:3535
+2644:2:3540
+2645:2:3544
+2646:2:3545
+2647:2:3553
+2648:2:3554
+2649:2:3558
+2650:2:3559
+2651:2:3553
+2652:2:3554
+2653:2:3558
+2654:2:3559
+2655:2:3567
+2656:2:3574
+2657:2:3575
+2658:2:3579
+2659:2:3580
+2660:2:3587
+2661:2:3592
+2662:2:3599
+2663:2:3600
+2664:2:3599
+2665:2:3600
+2666:2:3607
+2667:2:3617
+2668:0:4029
+2669:2:2767
+2670:0:4029
+2671:2:3623
+2672:2:3632
+2673:2:3633
+2674:2:3637
+2675:2:3638
+2676:2:3642
+2677:2:3643
+2678:2:3651
+2679:2:3656
+2680:2:3660
+2681:2:3661
+2682:2:3669
+2683:2:3670
+2684:2:3674
+2685:2:3675
+2686:2:3669
+2687:2:3670
+2688:2:3674
+2689:2:3675
+2690:2:3683
+2691:2:3690
+2692:2:3691
+2693:2:3695
+2694:2:3696
+2695:2:3703
+2696:2:3708
+2697:2:3715
+2698:2:3716
+2699:2:3715
+2700:2:3716
+2701:2:3723
+2702:0:4029
+2703:2:2767
+2704:0:4029
+2705:1:12
+2706:1:13
+2707:1:17
+2708:1:18
+2709:1:26
+2710:1:27
+2711:1:28
+2712:1:40
+2713:1:45
+2714:1:49
+2715:1:50
+2716:1:58
+2717:1:59
+2718:1:63
+2719:1:64
+2720:1:58
+2721:1:59
+2722:1:63
+2723:1:64
+2724:1:72
+2725:1:77
+2726:1:84
+2727:1:85
+2728:1:92
+2729:1:97
+2730:1:104
+2731:1:105
+2732:1:104
+2733:1:105
+2734:1:112
+2735:0:4029
+2736:1:11
+2737:0:4029
+2738:1:123
+2739:1:124
+2740:0:4029
+2741:1:11
+2742:0:4029
+2743:1:130
+2744:1:131
+2745:1:135
+2746:1:136
+2747:1:144
+2748:1:145
+2749:1:149
+2750:1:150
+2751:1:158
+2752:1:163
+2753:1:167
+2754:1:168
+2755:1:176
+2756:1:177
+2757:1:181
+2758:1:182
+2759:1:176
+2760:1:177
+2761:1:181
+2762:1:182
+2763:1:190
+2764:1:195
+2765:1:202
+2766:1:203
+2767:1:210
+2768:1:215
+2769:1:222
+2770:1:223
+2771:1:222
+2772:1:223
+2773:1:230
+2774:0:4029
+2775:1:11
+2776:0:4029
+2777:1:241
+2778:1:242
+2779:1:246
+2780:1:247
+2781:1:255
+2782:1:256
+2783:1:260
+2784:1:261
+2785:1:269
+2786:1:274
+2787:1:278
+2788:1:279
+2789:1:287
+2790:1:288
+2791:1:292
+2792:1:293
+2793:1:287
+2794:1:288
+2795:1:292
+2796:1:293
+2797:1:301
+2798:1:306
+2799:1:313
+2800:1:314
+2801:1:321
+2802:1:326
+2803:1:333
+2804:1:334
+2805:1:333
+2806:1:334
+2807:1:341
+2808:1:350
+2809:0:4029
+2810:1:11
+2811:0:4029
+2812:1:468
+2813:1:472
+2814:1:473
+2815:1:477
+2816:1:478
+2817:1:486
+2818:1:494
+2819:1:495
+2820:1:499
+2821:1:503
+2822:1:504
+2823:1:499
+2824:1:503
+2825:1:504
+2826:1:508
+2827:1:515
+2828:1:522
+2829:1:523
+2830:1:530
+2831:1:535
+2832:1:542
+2833:1:543
+2834:1:542
+2835:1:543
+2836:1:550
+2837:0:4029
+2838:1:11
+2839:0:4029
+2840:2:3507
+2841:2:3516
+2842:2:3517
+2843:2:3521
+2844:2:3522
+2845:2:3526
+2846:2:3527
+2847:2:3535
+2848:2:3540
+2849:2:3544
+2850:2:3545
+2851:2:3553
+2852:2:3554
+2853:2:3558
+2854:2:3559
+2855:2:3553
+2856:2:3554
+2857:2:3558
+2858:2:3559
+2859:2:3567
+2860:2:3574
+2861:2:3575
+2862:2:3579
+2863:2:3580
+2864:2:3587
+2865:2:3592
+2866:2:3599
+2867:2:3600
+2868:2:3599
+2869:2:3600
+2870:2:3607
+2871:2:3617
+2872:0:4029
+2873:2:2767
+2874:0:4029
+2875:2:3623
+2876:2:3632
+2877:2:3633
+2878:2:3637
+2879:2:3638
+2880:2:3642
+2881:2:3643
+2882:2:3651
+2883:2:3656
+2884:2:3660
+2885:2:3661
+2886:2:3669
+2887:2:3670
+2888:2:3674
+2889:2:3675
+2890:2:3669
+2891:2:3670
+2892:2:3674
+2893:2:3675
+2894:2:3683
+2895:2:3690
+2896:2:3691
+2897:2:3695
+2898:2:3696
+2899:2:3703
+2900:2:3708
+2901:2:3715
+2902:2:3716
+2903:2:3715
+2904:2:3716
+2905:2:3723
+2906:0:4029
+2907:2:2767
+2908:0:4029
+2909:1:678
+2910:1:679
+2911:1:683
+2912:1:684
+2913:1:692
+2914:1:693
+2915:1:697
+2916:1:698
+2917:1:706
+2918:1:711
+2919:1:715
+2920:1:716
+2921:1:724
+2922:1:725
+2923:1:729
+2924:1:730
+2925:1:724
+2926:1:725
+2927:1:729
+2928:1:730
+2929:1:738
+2930:1:743
+2931:1:750
+2932:1:751
+2933:1:758
+2934:1:763
+2935:1:770
+2936:1:771
+2937:1:770
+2938:1:771
+2939:1:778
+2940:0:4029
+2941:1:11
+2942:0:4029
+2943:2:3507
+2944:2:3516
+2945:2:3517
+2946:2:3521
+2947:2:3522
+2948:2:3526
+2949:2:3527
+2950:2:3535
+2951:2:3540
+2952:2:3544
+2953:2:3545
+2954:2:3553
+2955:2:3554
+2956:2:3558
+2957:2:3559
+2958:2:3553
+2959:2:3554
+2960:2:3558
+2961:2:3559
+2962:2:3567
+2963:2:3574
+2964:2:3575
+2965:2:3579
+2966:2:3580
+2967:2:3587
+2968:2:3592
+2969:2:3599
+2970:2:3600
+2971:2:3599
+2972:2:3600
+2973:2:3607
+2974:2:3617
+2975:0:4029
+2976:2:2767
+2977:0:4029
+2978:2:3623
+2979:2:3632
+2980:2:3633
+2981:2:3637
+2982:2:3638
+2983:2:3642
+2984:2:3643
+2985:2:3651
+2986:2:3656
+2987:2:3660
+2988:2:3661
+2989:2:3669
+2990:2:3670
+2991:2:3674
+2992:2:3675
+2993:2:3669
+2994:2:3670
+2995:2:3674
+2996:2:3675
+2997:2:3683
+2998:2:3690
+2999:2:3691
+3000:2:3695
+3001:2:3696
+3002:2:3703
+3003:2:3708
+3004:2:3715
+3005:2:3716
+3006:2:3715
+3007:2:3716
+3008:2:3723
+3009:0:4029
+3010:2:2767
+3011:0:4029
+3012:1:560
+3013:1:561
+3014:1:565
+3015:1:566
+3016:1:574
+3017:1:575
+3018:1:579
+3019:1:580
+3020:1:588
+3021:1:593
+3022:1:597
+3023:1:598
+3024:1:606
+3025:1:607
+3026:1:611
+3027:1:612
+3028:1:606
+3029:1:607
+3030:1:611
+3031:1:612
+3032:1:620
+3033:1:625
+3034:1:632
+3035:1:633
+3036:1:640
+3037:1:645
+3038:1:652
+3039:1:653
+3040:1:652
+3041:1:653
+3042:1:660
+3043:0:4029
+3044:1:11
+3045:0:4029
+3046:2:3507
+3047:2:3516
+3048:2:3517
+3049:2:3521
+3050:2:3522
+3051:2:3526
+3052:2:3527
+3053:2:3535
+3054:2:3540
+3055:2:3544
+3056:2:3545
+3057:2:3553
+3058:2:3554
+3059:2:3558
+3060:2:3559
+3061:2:3553
+3062:2:3554
+3063:2:3558
+3064:2:3559
+3065:2:3567
+3066:2:3574
+3067:2:3575
+3068:2:3579
+3069:2:3580
+3070:2:3587
+3071:2:3592
+3072:2:3599
+3073:2:3600
+3074:2:3599
+3075:2:3600
+3076:2:3607
+3077:2:3617
+3078:0:4029
+3079:2:2767
+3080:0:4029
+3081:2:3623
+3082:2:3632
+3083:2:3633
+3084:2:3637
+3085:2:3638
+3086:2:3642
+3087:2:3643
+3088:2:3651
+3089:2:3656
+3090:2:3660
+3091:2:3661
+3092:2:3669
+3093:2:3670
+3094:2:3674
+3095:2:3675
+3096:2:3669
+3097:2:3670
+3098:2:3674
+3099:2:3675
+3100:2:3683
+3101:2:3690
+3102:2:3691
+3103:2:3695
+3104:2:3696
+3105:2:3703
+3106:2:3708
+3107:2:3715
+3108:2:3716
+3109:2:3715
+3110:2:3716
+3111:2:3723
+3112:0:4029
+3113:2:2767
+3114:0:4029
+3115:1:1016
+3116:1:1017
+3117:1:1021
+3118:1:1022
+3119:1:1030
+3120:1:1031
+3121:1:1035
+3122:1:1036
+3123:1:1044
+3124:1:1049
+3125:1:1053
+3126:1:1054
+3127:1:1062
+3128:1:1063
+3129:1:1067
+3130:1:1068
+3131:1:1062
+3132:1:1063
+3133:1:1067
+3134:1:1068
+3135:1:1076
+3136:1:1081
+3137:1:1088
+3138:1:1089
+3139:1:1096
+3140:1:1101
+3141:1:1108
+3142:1:1109
+3143:1:1108
+3144:1:1109
+3145:1:1116
+3146:0:4029
+3147:1:11
+3148:0:4029
+3149:2:3507
+3150:2:3516
+3151:2:3517
+3152:2:3521
+3153:2:3522
+3154:2:3526
+3155:2:3527
+3156:2:3535
+3157:2:3540
+3158:2:3544
+3159:2:3545
+3160:2:3553
+3161:2:3554
+3162:2:3558
+3163:2:3559
+3164:2:3553
+3165:2:3554
+3166:2:3558
+3167:2:3559
+3168:2:3567
+3169:2:3574
+3170:2:3575
+3171:2:3579
+3172:2:3580
+3173:2:3587
+3174:2:3592
+3175:2:3599
+3176:2:3600
+3177:2:3599
+3178:2:3600
+3179:2:3607
+3180:2:3617
+3181:0:4029
+3182:2:2767
+3183:0:4029
+3184:2:3623
+3185:2:3632
+3186:2:3633
+3187:2:3637
+3188:2:3638
+3189:2:3642
+3190:2:3643
+3191:2:3651
+3192:2:3656
+3193:2:3660
+3194:2:3661
+3195:2:3669
+3196:2:3670
+3197:2:3674
+3198:2:3675
+3199:2:3669
+3200:2:3670
+3201:2:3674
+3202:2:3675
+3203:2:3683
+3204:2:3690
+3205:2:3691
+3206:2:3695
+3207:2:3696
+3208:2:3703
+3209:2:3708
+3210:2:3715
+3211:2:3716
+3212:2:3715
+3213:2:3716
+3214:2:3723
+3215:0:4029
+3216:2:2767
+3217:0:4029
+3218:1:671
+3219:1:674
+3220:1:675
+3221:0:4029
+3222:1:11
+3223:0:4029
+3224:2:3507
+3225:2:3516
+3226:2:3517
+3227:2:3521
+3228:2:3522
+3229:2:3526
+3230:2:3527
+3231:2:3535
+3232:2:3540
+3233:2:3544
+3234:2:3545
+3235:2:3553
+3236:2:3554
+3237:2:3558
+3238:2:3559
+3239:2:3553
+3240:2:3554
+3241:2:3558
+3242:2:3559
+3243:2:3567
+3244:2:3574
+3245:2:3575
+3246:2:3579
+3247:2:3580
+3248:2:3587
+3249:2:3592
+3250:2:3599
+3251:2:3600
+3252:2:3599
+3253:2:3600
+3254:2:3607
+3255:2:3617
+3256:0:4029
+3257:2:2767
+3258:0:4029
+3259:2:3623
+3260:2:3632
+3261:2:3633
+3262:2:3637
+3263:2:3638
+3264:2:3642
+3265:2:3643
+3266:2:3651
+3267:2:3656
+3268:2:3660
+3269:2:3661
+3270:2:3669
+3271:2:3670
+3272:2:3674
+3273:2:3675
+3274:2:3669
+3275:2:3670
+3276:2:3674
+3277:2:3675
+3278:2:3683
+3279:2:3690
+3280:2:3691
+3281:2:3695
+3282:2:3696
+3283:2:3703
+3284:2:3708
+3285:2:3715
+3286:2:3716
+3287:2:3715
+3288:2:3716
+3289:2:3723
+3290:0:4029
+3291:2:2767
+3292:0:4029
+3293:1:902
+3294:1:903
+3295:1:907
+3296:1:908
+3297:1:916
+3298:1:917
+3299:1:921
+3300:1:922
+3301:1:930
+3302:1:935
+3303:1:939
+3304:1:940
+3305:1:948
+3306:1:949
+3307:1:953
+3308:1:954
+3309:1:948
+3310:1:949
+3311:1:953
+3312:1:954
+3313:1:962
+3314:1:967
+3315:1:974
+3316:1:975
+3317:1:982
+3318:1:987
+3319:1:994
+3320:1:995
+3321:1:994
+3322:1:995
+3323:1:1002
+3324:1:1011
+3325:1:1015
+3326:0:4029
+3327:1:11
+3328:0:4029
+3329:2:3507
+3330:2:3516
+3331:2:3517
+3332:2:3521
+3333:2:3522
+3334:2:3526
+3335:2:3527
+3336:2:3535
+3337:2:3540
+3338:2:3544
+3339:2:3545
+3340:2:3553
+3341:2:3554
+3342:2:3558
+3343:2:3559
+3344:2:3553
+3345:2:3554
+3346:2:3558
+3347:2:3559
+3348:2:3567
+3349:2:3574
+3350:2:3575
+3351:2:3579
+3352:2:3580
+3353:2:3587
+3354:2:3592
+3355:2:3599
+3356:2:3600
+3357:2:3599
+3358:2:3600
+3359:2:3607
+3360:2:3617
+3361:0:4029
+3362:2:2767
+3363:0:4029
+3364:2:3623
+3365:2:3632
+3366:2:3633
+3367:2:3637
+3368:2:3638
+3369:2:3642
+3370:2:3643
+3371:2:3651
+3372:2:3656
+3373:2:3660
+3374:2:3661
+3375:2:3669
+3376:2:3670
+3377:2:3674
+3378:2:3675
+3379:2:3669
+3380:2:3670
+3381:2:3674
+3382:2:3675
+3383:2:3683
+3384:2:3690
+3385:2:3691
+3386:2:3695
+3387:2:3696
+3388:2:3703
+3389:2:3708
+3390:2:3715
+3391:2:3716
+3392:2:3715
+3393:2:3716
+3394:2:3723
+3395:0:4029
+3396:2:2767
+3397:0:4029
+3398:1:1127
+3399:0:4029
+3400:2:3507
+3401:2:3516
+3402:2:3517
+3403:2:3521
+3404:2:3522
+3405:2:3526
+3406:2:3527
+3407:2:3535
+3408:2:3540
+3409:2:3544
+3410:2:3545
+3411:2:3553
+3412:2:3554
+3413:2:3558
+3414:2:3559
+3415:2:3553
+3416:2:3554
+3417:2:3558
+3418:2:3559
+3419:2:3567
+3420:2:3574
+3421:2:3575
+3422:2:3579
+3423:2:3580
+3424:2:3587
+3425:2:3592
+3426:2:3599
+3427:2:3600
+3428:2:3599
+3429:2:3600
+3430:2:3607
+3431:2:3617
+3432:0:4029
+3433:2:2767
+3434:0:4029
+3435:2:3623
+3436:2:3632
+3437:2:3633
+3438:2:3637
+3439:2:3638
+3440:2:3642
+3441:2:3643
+3442:2:3651
+3443:2:3656
+3444:2:3660
+3445:2:3661
+3446:2:3669
+3447:2:3670
+3448:2:3674
+3449:2:3675
+3450:2:3669
+3451:2:3670
+3452:2:3674
+3453:2:3675
+3454:2:3683
+3455:2:3690
+3456:2:3691
+3457:2:3695
+3458:2:3696
+3459:2:3703
+3460:2:3708
+3461:2:3715
+3462:2:3716
+3463:2:3715
+3464:2:3716
+3465:2:3723
+3466:0:4029
+3467:2:2767
+3468:0:4029
+3469:1:2663
+3470:1:2670
+3471:1:2673
+3472:1:2674
+3473:1:2678
+3474:1:2683
+3475:1:2690
+3476:1:2691
+3477:1:2690
+3478:1:2691
+3479:1:2698
+3480:1:2702
+3481:0:4029
+3482:2:3507
+3483:2:3516
+3484:2:3517
+3485:2:3521
+3486:2:3522
+3487:2:3526
+3488:2:3527
+3489:2:3535
+3490:2:3540
+3491:2:3544
+3492:2:3545
+3493:2:3553
+3494:2:3554
+3495:2:3558
+3496:2:3559
+3497:2:3553
+3498:2:3554
+3499:2:3558
+3500:2:3559
+3501:2:3567
+3502:2:3574
+3503:2:3575
+3504:2:3579
+3505:2:3580
+3506:2:3587
+3507:2:3592
+3508:2:3599
+3509:2:3600
+3510:2:3599
+3511:2:3600
+3512:2:3607
+3513:2:3617
+3514:0:4029
+3515:2:2767
+3516:0:4029
+3517:2:3623
+3518:2:3632
+3519:2:3633
+3520:2:3637
+3521:2:3638
+3522:2:3642
+3523:2:3643
+3524:2:3651
+3525:2:3656
+3526:2:3660
+3527:2:3661
+3528:2:3669
+3529:2:3670
+3530:2:3674
+3531:2:3675
+3532:2:3669
+3533:2:3670
+3534:2:3674
+3535:2:3675
+3536:2:3683
+3537:2:3690
+3538:2:3691
+3539:2:3695
+3540:2:3696
+3541:2:3703
+3542:2:3708
+3543:2:3715
+3544:2:3716
+3545:2:3715
+3546:2:3716
+3547:2:3723
+3548:0:4029
+3549:2:2767
+3550:0:4029
+3551:1:1129
+3552:1:1130
+3553:0:4029
+3554:1:11
+3555:0:4029
+3556:2:3507
+3557:2:3516
+3558:2:3517
+3559:2:3521
+3560:2:3522
+3561:2:3526
+3562:2:3527
+3563:2:3535
+3564:2:3540
+3565:2:3544
+3566:2:3545
+3567:2:3553
+3568:2:3554
+3569:2:3558
+3570:2:3559
+3571:2:3553
+3572:2:3554
+3573:2:3558
+3574:2:3559
+3575:2:3567
+3576:2:3574
+3577:2:3575
+3578:2:3579
+3579:2:3580
+3580:2:3587
+3581:2:3592
+3582:2:3599
+3583:2:3600
+3584:2:3599
+3585:2:3600
+3586:2:3607
+3587:2:3617
+3588:0:4029
+3589:2:2767
+3590:0:4029
+3591:2:3623
+3592:2:3632
+3593:2:3633
+3594:2:3637
+3595:2:3638
+3596:2:3642
+3597:2:3643
+3598:2:3651
+3599:2:3656
+3600:2:3660
+3601:2:3661
+3602:2:3669
+3603:2:3670
+3604:2:3674
+3605:2:3675
+3606:2:3669
+3607:2:3670
+3608:2:3674
+3609:2:3675
+3610:2:3683
+3611:2:3690
+3612:2:3691
+3613:2:3695
+3614:2:3696
+3615:2:3703
+3616:2:3708
+3617:2:3715
+3618:2:3716
+3619:2:3715
+3620:2:3716
+3621:2:3723
+3622:0:4029
+3623:2:2767
+3624:0:4029
+3625:1:1131
+3626:1:1132
+3627:1:1136
+3628:1:1137
+3629:1:1145
+3630:1:1146
+3631:1:1147
+3632:1:1159
+3633:1:1164
+3634:1:1168
+3635:1:1169
+3636:1:1177
+3637:1:1178
+3638:1:1182
+3639:1:1183
+3640:1:1177
+3641:1:1178
+3642:1:1182
+3643:1:1183
+3644:1:1191
+3645:1:1196
+3646:1:1203
+3647:1:1204
+3648:1:1211
+3649:1:1216
+3650:1:1223
+3651:1:1224
+3652:1:1223
+3653:1:1224
+3654:1:1231
+3655:0:4029
+3656:1:11
+3657:0:4029
+3658:1:1242
+3659:1:1243
+3660:1:1247
+3661:1:1248
+3662:1:1256
+3663:1:1257
+3664:1:1261
+3665:1:1262
+3666:1:1270
+3667:1:1275
+3668:1:1279
+3669:1:1280
+3670:1:1288
+3671:1:1289
+3672:1:1293
+3673:1:1294
+3674:1:1288
+3675:1:1289
+3676:1:1293
+3677:1:1294
+3678:1:1302
+3679:1:1307
+3680:1:1314
+3681:1:1315
+3682:1:1322
+3683:1:1327
+3684:1:1334
+3685:1:1335
+3686:1:1334
+3687:1:1335
+3688:1:1342
+3689:1:1351
+3690:1:1355
+3691:0:4029
+3692:1:11
+3693:0:4029
+3694:1:1356
+3695:1:1360
+3696:1:1361
+3697:1:1365
+3698:1:1366
+3699:1:1374
+3700:1:1382
+3701:1:1383
+3702:1:1387
+3703:1:1391
+3704:1:1392
+3705:1:1387
+3706:1:1391
+3707:1:1392
+3708:1:1396
+3709:1:1403
+3710:1:1410
+3711:1:1411
+3712:1:1418
+3713:1:1423
+3714:1:1430
+3715:1:1431
+3716:1:1430
+3717:1:1431
+3718:1:1438
+3719:0:4029
+3720:1:11
+3721:0:4029
+3722:1:1448
+3723:1:1449
+3724:1:1453
+3725:1:1454
+3726:1:1462
+3727:1:1463
+3728:1:1467
+3729:1:1468
+3730:1:1476
+3731:1:1481
+3732:1:1485
+3733:1:1486
+3734:1:1494
+3735:1:1495
+3736:1:1499
+3737:1:1500
+3738:1:1494
+3739:1:1495
+3740:1:1499
+3741:1:1500
+3742:1:1508
+3743:1:1513
+3744:1:1520
+3745:1:1521
+3746:1:1528
+3747:1:1533
+3748:1:1540
+3749:1:1541
+3750:1:1540
+3751:1:1541
+3752:1:1548
+3753:0:4029
+3754:2:3507
+3755:2:3516
+3756:2:3517
+3757:2:3521
+3758:2:3522
+3759:2:3526
+3760:2:3527
+3761:2:3535
+3762:2:3540
+3763:2:3544
+3764:2:3545
+3765:2:3553
+3766:2:3554
+3767:2:3558
+3768:2:3559
+3769:2:3553
+3770:2:3554
+3771:2:3558
+3772:2:3559
+3773:2:3567
+3774:2:3574
+3775:2:3575
+3776:2:3579
+3777:2:3580
+3778:2:3587
+3779:2:3592
+3780:2:3599
+3781:2:3600
+3782:2:3599
+3783:2:3600
+3784:2:3607
+3785:2:3617
+3786:0:4029
+3787:2:2767
+3788:0:4029
+3789:1:11
+3790:0:4029
+3791:1:1559
+3792:1:1560
+3793:1:1564
+3794:1:1565
+3795:1:1573
+3796:1:1574
+3797:1:1578
+3798:1:1579
+3799:1:1587
+3800:1:1592
+3801:1:1596
+3802:1:1597
+3803:1:1605
+3804:1:1606
+3805:1:1610
+3806:1:1611
+3807:1:1605
+3808:1:1606
+3809:1:1610
+3810:1:1611
+3811:1:1619
+3812:1:1624
+3813:1:1631
+3814:1:1632
+3815:1:1639
+3816:1:1644
+3817:1:1651
+3818:1:1652
+3819:1:1651
+3820:1:1652
+3821:1:1659
+3822:1:1668
+3823:1:1672
+3824:0:4029
+3825:1:11
+3826:0:4029
+3827:1:1673
+3828:1:1674
+3829:1:1678
+3830:1:1679
+3831:1:1687
+3832:1:1688
+3833:1:1689
+3834:1:1701
+3835:1:1706
+3836:1:1710
+3837:1:1711
+3838:1:1719
+3839:1:1720
+3840:1:1724
+3841:1:1725
+3842:1:1719
+3843:1:1720
+3844:1:1724
+3845:1:1725
+3846:1:1733
+3847:1:1738
+3848:1:1745
+3849:1:1746
+3850:1:1753
+3851:1:1758
+3852:1:1765
+3853:1:1766
+3854:1:1765
+3855:1:1766
+3856:1:1773
+3857:0:4029
+3858:1:11
+3859:0:4029
+3860:2:3623
+3861:2:3632
+3862:2:3633
+3863:2:3637
+3864:2:3638
+3865:2:3642
+3866:2:3643
+3867:2:3651
+3868:2:3656
+3869:2:3660
+3870:2:3661
+3871:2:3669
+3872:2:3670
+3873:2:3674
+3874:2:3675
+3875:2:3669
+3876:2:3670
+3877:2:3674
+3878:2:3675
+3879:2:3683
+3880:2:3690
+3881:2:3691
+3882:2:3695
+3883:2:3696
+3884:2:3703
+3885:2:3708
+3886:2:3715
+3887:2:3716
+3888:2:3715
+3889:2:3716
+3890:2:3723
+3891:0:4029
+3892:2:2767
+3893:0:4029
+3894:1:1784
+3895:1:1785
+3896:0:4029
+3897:1:11
+3898:0:4029
+3899:1:1791
+3900:1:1792
+3901:1:1796
+3902:1:1797
+3903:1:1805
+3904:1:1806
+3905:1:1810
+3906:1:1811
+3907:1:1819
+3908:1:1824
+3909:1:1828
+3910:1:1829
+3911:1:1837
+3912:1:1838
+3913:1:1842
+3914:1:1843
+3915:1:1837
+3916:1:1838
+3917:1:1842
+3918:1:1843
+3919:1:1851
+3920:1:1856
+3921:1:1863
+3922:1:1864
+3923:1:1871
+3924:1:1876
+3925:1:1883
+3926:1:1884
+3927:1:1883
+3928:1:1884
+3929:1:1891
+3930:0:4029
+3931:1:11
+3932:0:4029
+3933:1:1902
+3934:1:1903
+3935:1:1907
+3936:1:1908
+3937:1:1916
+3938:1:1917
+3939:1:1921
+3940:1:1922
+3941:1:1930
+3942:1:1935
+3943:1:1939
+3944:1:1940
+3945:1:1948
+3946:1:1949
+3947:1:1953
+3948:1:1954
+3949:1:1948
+3950:1:1949
+3951:1:1953
+3952:1:1954
+3953:1:1962
+3954:1:1967
+3955:1:1974
+3956:1:1975
+3957:1:1982
+3958:1:1987
+3959:1:1994
+3960:1:1995
+3961:1:1994
+3962:1:1995
+3963:1:2002
+3964:1:2011
+3965:0:4029
+3966:1:11
+3967:0:4029
+3968:1:2129
+3969:1:2133
+3970:1:2134
+3971:1:2138
+3972:1:2139
+3973:1:2147
+3974:1:2155
+3975:1:2156
+3976:1:2160
+3977:1:2164
+3978:1:2165
+3979:1:2160
+3980:1:2164
+3981:1:2165
+3982:1:2169
+3983:1:2176
+3984:1:2183
+3985:1:2184
+3986:1:2191
+3987:1:2196
+3988:1:2203
+3989:1:2204
+3990:1:2203
+3991:1:2204
+3992:1:2211
+3993:0:4029
+3994:1:11
+3995:0:4029
+3996:1:2221
+3997:1:2222
+3998:1:2226
+3999:1:2227
+4000:1:2235
+4001:1:2236
+4002:1:2240
+4003:1:2241
+4004:1:2249
+4005:1:2254
+4006:1:2258
+4007:1:2259
+4008:1:2267
+4009:1:2268
+4010:1:2272
+4011:1:2273
+4012:1:2267
+4013:1:2268
+4014:1:2272
+4015:1:2273
+4016:1:2281
+4017:1:2286
+4018:1:2293
+4019:1:2294
+4020:1:2301
+4021:1:2306
+4022:1:2313
+4023:1:2314
+4024:1:2313
+4025:1:2314
+4026:1:2321
+4027:0:4029
+4028:1:11
+4029:0:4029
+4030:1:2332
+4031:0:4029
+4032:1:2706
+4033:1:2713
+4034:1:2714
+4035:1:2721
+4036:1:2726
+4037:1:2733
+4038:1:2734
+4039:1:2733
+4040:1:2734
+4041:1:2741
+4042:1:2745
+4043:0:4029
+4044:1:2334
+4045:1:2335
+4046:0:4029
+4047:1:11
+4048:0:4029
+4049:1:2336
+4050:1:2340
+4051:1:2341
+4052:1:2345
+4053:1:2349
+4054:1:2350
+4055:1:2354
+4056:1:2362
+4057:1:2363
+4058:1:2367
+4059:1:2371
+4060:1:2372
+4061:1:2367
+4062:1:2371
+4063:1:2372
+4064:1:2376
+4065:1:2383
+4066:1:2390
+4067:1:2391
+4068:1:2398
+4069:1:2403
+4070:1:2410
+4071:1:2411
+4072:1:2410
+4073:1:2411
+4074:1:2418
+4075:0:4029
+4076:1:11
+4077:0:4029
+4078:1:2428
+4079:1:2429
+4080:1:2433
+4081:1:2434
+4082:1:2442
+4083:1:2443
+4084:1:2447
+4085:1:2448
+4086:1:2456
+4087:1:2461
+4088:1:2465
+4089:1:2466
+4090:1:2474
+4091:1:2475
+4092:1:2479
+4093:1:2480
+4094:1:2474
+4095:1:2475
+4096:1:2479
+4097:1:2480
+4098:1:2488
+4099:1:2493
+4100:1:2500
+4101:1:2501
+4102:1:2508
+4103:1:2513
+4104:1:2520
+4105:1:2521
+4106:1:2520
+4107:1:2521
+4108:1:2528
+4109:0:4029
+4110:1:11
+4111:0:4029
+4112:1:2539
+4113:1:2540
+4114:1:2544
+4115:1:2545
+4116:1:2553
+4117:1:2554
+4118:1:2558
+4119:1:2559
+4120:1:2567
+4121:1:2572
+4122:1:2576
+4123:1:2577
+4124:1:2585
+4125:1:2586
+4126:1:2590
+4127:1:2591
+4128:1:2585
+4129:1:2586
+4130:1:2590
+4131:1:2591
+4132:1:2599
+4133:1:2604
+4134:1:2611
+4135:1:2612
+4136:1:2619
+4137:1:2624
+4138:1:2631
+4139:1:2632
+4140:1:2631
+4141:1:2632
+4142:1:2639
+4143:1:2648
+4144:1:2652
+4145:0:4029
+4146:1:11
+4147:0:4029
+4148:1:2653
+4149:0:4029
+4150:1:2661
+4151:0:4029
+4152:1:2749
+4153:0:4029
+4154:1:9
+4155:0:4029
+4156:1:10
+4157:0:4029
+4158:1:11
+4159:0:4029
+4160:1:12
+4161:1:13
+4162:1:17
+4163:1:18
+4164:1:26
+4165:1:27
+4166:1:28
+4167:1:40
+4168:1:45
+4169:1:49
+4170:1:50
+4171:1:58
+4172:1:59
+4173:1:63
+4174:1:64
+4175:1:58
+4176:1:59
+4177:1:63
+4178:1:64
+4179:1:72
+4180:1:77
+4181:1:84
+4182:1:85
+4183:1:92
+4184:1:97
+4185:1:104
+4186:1:105
+4187:1:104
+4188:1:105
+4189:1:112
+4190:0:4029
+4191:1:11
+4192:0:4029
+4193:1:123
+4194:1:124
+4195:0:4029
+4196:1:11
+4197:0:4029
+4198:1:130
+4199:1:131
+4200:1:135
+4201:1:136
+4202:1:144
+4203:1:145
+4204:1:149
+4205:1:150
+4206:1:158
+4207:1:163
+4208:1:167
+4209:1:168
+4210:1:176
+4211:1:177
+4212:1:181
+4213:1:182
+4214:1:176
+4215:1:177
+4216:1:181
+4217:1:182
+4218:1:190
+4219:1:195
+4220:1:202
+4221:1:203
+4222:1:210
+4223:1:215
+4224:1:222
+4225:1:223
+4226:1:222
+4227:1:223
+4228:1:230
+4229:0:4029
+4230:1:11
+4231:0:4029
+4232:1:241
+4233:1:242
+4234:1:246
+4235:1:247
+4236:1:255
+4237:1:256
+4238:1:260
+4239:1:261
+4240:1:269
+4241:1:274
+4242:1:278
+4243:1:279
+4244:1:287
+4245:1:288
+4246:1:292
+4247:1:293
+4248:1:287
+4249:1:288
+4250:1:292
+4251:1:293
+4252:1:301
+4253:1:306
+4254:1:313
+4255:1:314
+4256:1:321
+4257:1:326
+4258:1:333
+4259:1:334
+4260:1:333
+4261:1:334
+4262:1:341
+4263:1:350
+4264:0:4029
+4265:1:11
+4266:0:4029
+4267:1:468
+4268:1:472
+4269:1:473
+4270:1:477
+4271:1:478
+4272:1:486
+4273:1:494
+4274:1:495
+4275:1:499
+4276:1:503
+4277:1:504
+4278:1:499
+4279:1:503
+4280:1:504
+4281:1:508
+4282:1:515
+4283:1:522
+4284:1:523
+4285:1:530
+4286:1:535
+4287:1:542
+4288:1:543
+4289:1:542
+4290:1:543
+4291:1:550
+4292:0:4029
+4293:1:11
+4294:0:4029
+4295:1:560
+4296:1:561
+4297:1:565
+4298:1:566
+4299:1:574
+4300:1:575
+4301:1:579
+4302:1:580
+4303:1:588
+4304:1:593
+4305:1:597
+4306:1:598
+4307:1:606
+4308:1:607
+4309:1:611
+4310:1:612
+4311:1:606
+4312:1:607
+4313:1:611
+4314:1:612
+4315:1:620
+4316:1:625
+4317:1:632
+4318:1:633
+4319:1:640
+4320:1:645
+4321:1:652
+4322:1:653
+4323:1:652
+4324:1:653
+4325:1:660
+4326:0:4029
+4327:1:11
+4328:0:4029
+4329:1:671
+4330:1:674
+4331:1:675
+4332:0:4029
+4333:1:11
+4334:0:4029
+4335:1:678
+4336:1:679
+4337:1:683
+4338:1:684
+4339:1:692
+4340:1:693
+4341:1:697
+4342:1:698
+4343:1:706
+4344:1:711
+4345:1:715
+4346:1:716
+4347:1:724
+4348:1:725
+4349:1:729
+4350:1:730
+4351:1:724
+4352:1:725
+4353:1:729
+4354:1:730
+4355:1:738
+4356:1:743
+4357:1:750
+4358:1:751
+4359:1:758
+4360:1:763
+4361:1:770
+4362:1:771
+4363:1:770
+4364:1:771
+4365:1:778
+4366:0:4029
+4367:1:11
+4368:0:4029
+4369:1:902
+4370:1:903
+4371:1:907
+4372:1:908
+4373:1:916
+4374:1:917
+4375:1:921
+4376:1:922
+4377:1:930
+4378:1:935
+4379:1:939
+4380:1:940
+4381:1:948
+4382:1:949
+4383:1:953
+4384:1:954
+4385:1:948
+4386:1:949
+4387:1:953
+4388:1:954
+4389:1:962
+4390:1:967
+4391:1:974
+4392:1:975
+4393:1:982
+4394:1:987
+4395:1:994
+4396:1:995
+4397:1:994
+4398:1:995
+4399:1:1002
+4400:1:1011
+4401:1:1015
+4402:0:4029
+4403:1:11
+4404:0:4029
+4405:1:1016
+4406:1:1017
+4407:1:1021
+4408:1:1022
+4409:1:1030
+4410:1:1031
+4411:1:1032
+4412:1:1044
+4413:1:1049
+4414:1:1053
+4415:1:1054
+4416:1:1062
+4417:1:1063
+4418:1:1067
+4419:1:1068
+4420:1:1062
+4421:1:1063
+4422:1:1067
+4423:1:1068
+4424:1:1076
+4425:1:1081
+4426:1:1088
+4427:1:1089
+4428:1:1096
+4429:1:1101
+4430:1:1108
+4431:1:1109
+4432:1:1108
+4433:1:1109
+4434:1:1116
+4435:0:4029
+4436:1:11
+4437:0:4029
+4438:1:1127
+4439:0:4029
+4440:1:2663
+4441:1:2670
+4442:1:2671
+4443:1:2678
+4444:1:2683
+4445:1:2690
+4446:1:2691
+4447:1:2690
+4448:1:2691
+4449:1:2698
+4450:1:2702
+4451:0:4029
+4452:1:1129
+4453:1:1130
+4454:0:4029
+4455:1:11
+4456:0:4029
+4457:1:1131
+4458:1:1132
+4459:1:1136
+4460:1:1137
+4461:1:1145
+4462:1:1146
+4463:1:1150
+4464:1:1151
+4465:1:1159
+4466:1:1164
+4467:1:1168
+4468:1:1169
+4469:1:1177
+4470:1:1178
+4471:1:1182
+4472:1:1183
+4473:1:1177
+4474:1:1178
+4475:1:1182
+4476:1:1183
+4477:1:1191
+4478:1:1196
+4479:1:1203
+4480:1:1204
+4481:1:1211
+4482:1:1216
+4483:1:1223
+4484:1:1224
+4485:1:1223
+4486:1:1224
+4487:1:1231
+4488:0:4029
+4489:1:11
+4490:0:4029
+4491:1:1242
+4492:1:1243
+4493:1:1247
+4494:1:1248
+4495:1:1256
+4496:1:1257
+4497:1:1261
+4498:1:1262
+4499:1:1270
+4500:1:1275
+4501:1:1279
+4502:1:1280
+4503:1:1288
+4504:1:1289
+4505:1:1293
+4506:1:1294
+4507:1:1288
+4508:1:1289
+4509:1:1293
+4510:1:1294
+4511:1:1302
+4512:1:1307
+4513:1:1314
+4514:1:1315
+4515:1:1322
+4516:1:1327
+4517:1:1334
+4518:1:1335
+4519:1:1334
+4520:1:1335
+4521:1:1342
+4522:1:1351
+4523:1:1355
+4524:0:4029
+4525:1:11
+4526:0:4029
+4527:1:1356
+4528:1:1360
+4529:1:1361
+4530:1:1365
+4531:1:1366
+4532:1:1374
+4533:1:1382
+4534:1:1383
+4535:1:1387
+4536:1:1391
+4537:1:1392
+4538:1:1387
+4539:1:1391
+4540:1:1392
+4541:1:1396
+4542:1:1403
+4543:1:1410
+4544:1:1411
+4545:1:1418
+4546:1:1423
+4547:1:1430
+4548:1:1431
+4549:1:1430
+4550:1:1431
+4551:1:1438
+4552:0:4029
+4553:1:11
+4554:0:4029
+4555:1:1448
+4556:1:1449
+4557:1:1453
+4558:1:1454
+4559:1:1462
+4560:1:1463
+4561:1:1467
+4562:1:1468
+4563:1:1476
+4564:1:1481
+4565:1:1485
+4566:1:1486
+4567:1:1494
+4568:1:1495
+4569:1:1499
+4570:1:1500
+4571:1:1494
+4572:1:1495
+4573:1:1499
+4574:1:1500
+4575:1:1508
+4576:1:1513
+4577:1:1520
+4578:1:1521
+4579:1:1528
+4580:1:1533
+4581:1:1540
+4582:1:1541
+4583:1:1540
+4584:1:1541
+4585:1:1548
+4586:0:4029
+4587:1:11
+4588:0:4029
+4589:1:1559
+4590:1:1560
+4591:1:1564
+4592:1:1565
+4593:1:1573
+4594:1:1574
+4595:1:1578
+4596:1:1579
+4597:1:1587
+4598:1:1592
+4599:1:1596
+4600:1:1597
+4601:1:1605
+4602:1:1606
+4603:1:1610
+4604:1:1611
+4605:1:1605
+4606:1:1606
+4607:1:1610
+4608:1:1611
+4609:1:1619
+4610:1:1624
+4611:1:1631
+4612:1:1632
+4613:1:1639
+4614:1:1644
+4615:1:1651
+4616:1:1652
+4617:1:1651
+4618:1:1652
+4619:1:1659
+4620:1:1668
+4621:1:1672
+4622:0:4029
+4623:1:11
+4624:0:4029
+4625:1:1673
+4626:1:1674
+4627:1:1678
+4628:1:1679
+4629:1:1687
+4630:1:1696
+4631:1:1697
+4632:1:1701
+4633:1:1706
+4634:1:1710
+4635:1:1711
+4636:1:1719
+4637:1:1720
+4638:1:1724
+4639:1:1725
+4640:1:1719
+4641:1:1720
+4642:1:1724
+4643:1:1725
+4644:1:1733
+4645:1:1738
+4646:1:1745
+4647:1:1748
+4648:1:1749
+4649:1:1753
+4650:1:1758
+4651:1:1765
+4652:1:1766
+4653:1:1765
+4654:1:1766
+4655:1:1773
+4656:0:4029
+4657:1:11
+4658:0:4029
+4659:2:3507
+4660:2:3516
+4661:2:3517
+4662:2:3521
+4663:2:3522
+4664:2:3526
+4665:2:3527
+4666:2:3535
+4667:2:3540
+4668:2:3544
+4669:2:3545
+4670:2:3553
+4671:2:3554
+4672:2:3558
+4673:2:3559
+4674:2:3553
+4675:2:3554
+4676:2:3558
+4677:2:3559
+4678:2:3567
+4679:2:3574
+4680:2:3575
+4681:2:3579
+4682:2:3580
+4683:2:3587
+4684:2:3592
+4685:2:3599
+4686:2:3600
+4687:2:3599
+4688:2:3600
+4689:2:3607
+4690:2:3617
+4691:0:4029
+4692:2:2767
+4693:0:4029
+4694:2:3623
+4695:2:3632
+4696:2:3633
+4697:2:3637
+4698:2:3638
+4699:2:3642
+4700:2:3643
+4701:2:3651
+4702:2:3656
+4703:2:3660
+4704:2:3661
+4705:2:3669
+4706:2:3670
+4707:2:3674
+4708:2:3675
+4709:2:3669
+4710:2:3670
+4711:2:3674
+4712:2:3675
+4713:2:3683
+4714:2:3690
+4715:2:3691
+4716:2:3695
+4717:2:3696
+4718:2:3703
+4719:2:3708
+4720:2:3715
+4721:2:3716
+4722:2:3715
+4723:2:3716
+4724:2:3723
+4725:0:4029
+4726:2:2767
+4727:0:4029
+4728:1:1784
+4729:1:1785
+4730:0:4029
+4731:1:11
+4732:0:4029
+4733:2:3507
+4734:2:3516
+4735:2:3517
+4736:2:3521
+4737:2:3522
+4738:2:3526
+4739:2:3527
+4740:2:3535
+4741:2:3540
+4742:2:3544
+4743:2:3545
+4744:2:3553
+4745:2:3554
+4746:2:3558
+4747:2:3559
+4748:2:3553
+4749:2:3554
+4750:2:3558
+4751:2:3559
+4752:2:3567
+4753:2:3574
+4754:2:3575
+4755:2:3579
+4756:2:3580
+4757:2:3587
+4758:2:3592
+4759:2:3599
+4760:2:3600
+4761:2:3599
+4762:2:3600
+4763:2:3607
+4764:2:3617
+4765:0:4029
+4766:2:2767
+4767:0:4029
+4768:2:3623
+4769:2:3632
+4770:2:3633
+4771:2:3637
+4772:2:3638
+4773:2:3642
+4774:2:3643
+4775:2:3651
+4776:2:3656
+4777:2:3660
+4778:2:3661
+4779:2:3669
+4780:2:3670
+4781:2:3674
+4782:2:3675
+4783:2:3669
+4784:2:3670
+4785:2:3674
+4786:2:3675
+4787:2:3683
+4788:2:3690
+4789:2:3691
+4790:2:3695
+4791:2:3696
+4792:2:3703
+4793:2:3708
+4794:2:3715
+4795:2:3716
+4796:2:3715
+4797:2:3716
+4798:2:3723
+4799:0:4029
+4800:2:2767
+4801:0:4029
+4802:1:1791
+4803:1:1792
+4804:1:1796
+4805:1:1797
+4806:1:1805
+4807:1:1806
+4808:1:1807
+4809:1:1819
+4810:1:1824
+4811:1:1828
+4812:1:1829
+4813:1:1837
+4814:1:1838
+4815:1:1842
+4816:1:1843
+4817:1:1837
+4818:1:1838
+4819:1:1842
+4820:1:1843
+4821:1:1851
+4822:1:1856
+4823:1:1863
+4824:1:1864
+4825:1:1871
+4826:1:1876
+4827:1:1883
+4828:1:1884
+4829:1:1883
+4830:1:1884
+4831:1:1891
+4832:0:4029
+4833:1:11
+4834:0:4029
+4835:1:1902
+4836:1:1903
+4837:1:1907
+4838:1:1908
+4839:1:1916
+4840:1:1917
+4841:1:1921
+4842:1:1922
+4843:1:1930
+4844:1:1935
+4845:1:1939
+4846:1:1940
+4847:1:1948
+4848:1:1949
+4849:1:1953
+4850:1:1954
+4851:1:1948
+4852:1:1949
+4853:1:1953
+4854:1:1954
+4855:1:1962
+4856:1:1967
+4857:1:1974
+4858:1:1975
+4859:1:1982
+4860:1:1987
+4861:1:1994
+4862:1:1995
+4863:1:1994
+4864:1:1995
+4865:1:2002
+4866:1:2011
+4867:0:4029
+4868:1:11
+4869:0:4029
+4870:1:2129
+4871:1:2133
+4872:1:2134
+4873:1:2138
+4874:1:2139
+4875:1:2147
+4876:1:2155
+4877:1:2156
+4878:1:2160
+4879:1:2164
+4880:1:2165
+4881:1:2160
+4882:1:2164
+4883:1:2165
+4884:1:2169
+4885:1:2176
+4886:1:2183
+4887:1:2184
+4888:1:2191
+4889:1:2196
+4890:1:2203
+4891:1:2204
+4892:1:2203
+4893:1:2204
+4894:1:2211
+4895:0:4029
+4896:1:11
+4897:0:4029
+4898:2:3507
+4899:2:3516
+4900:2:3517
+4901:2:3521
+4902:2:3522
+4903:2:3526
+4904:2:3527
+4905:2:3535
+4906:2:3540
+4907:2:3544
+4908:2:3545
+4909:2:3553
+4910:2:3554
+4911:2:3558
+4912:2:3559
+4913:2:3553
+4914:2:3554
+4915:2:3558
+4916:2:3559
+4917:2:3567
+4918:2:3574
+4919:2:3575
+4920:2:3579
+4921:2:3580
+4922:2:3587
+4923:2:3592
+4924:2:3599
+4925:2:3600
+4926:2:3599
+4927:2:3600
+4928:2:3607
+4929:2:3617
+4930:0:4029
+4931:2:2767
+4932:0:4029
+4933:2:3623
+4934:2:3632
+4935:2:3633
+4936:2:3637
+4937:2:3638
+4938:2:3642
+4939:2:3643
+4940:2:3651
+4941:2:3656
+4942:2:3660
+4943:2:3661
+4944:2:3669
+4945:2:3670
+4946:2:3674
+4947:2:3675
+4948:2:3669
+4949:2:3670
+4950:2:3674
+4951:2:3675
+4952:2:3683
+4953:2:3690
+4954:2:3691
+4955:2:3695
+4956:2:3696
+4957:2:3703
+4958:2:3708
+4959:2:3715
+4960:2:3716
+4961:2:3715
+4962:2:3716
+4963:2:3723
+4964:0:4029
+4965:2:2767
+4966:0:4029
+4967:1:2221
+4968:1:2222
+4969:1:2226
+4970:1:2227
+4971:1:2235
+4972:1:2236
+4973:1:2240
+4974:1:2241
+4975:1:2249
+4976:1:2254
+4977:1:2258
+4978:1:2259
+4979:1:2267
+4980:1:2268
+4981:1:2272
+4982:1:2273
+4983:1:2267
+4984:1:2268
+4985:1:2272
+4986:1:2273
+4987:1:2281
+4988:1:2286
+4989:1:2293
+4990:1:2294
+4991:1:2301
+4992:1:2306
+4993:1:2313
+4994:1:2314
+4995:1:2313
+4996:1:2314
+4997:1:2321
This page took 1.431911 seconds and 4 git commands to generate.