From b16b8b96c1b809cfd6a3206d1685d0dd287c3392 Mon Sep 17 00:00:00 2001 From: Karl Hobley Date: Wed, 16 Dec 2020 13:44:45 +0000 Subject: [PATCH] Rename treatment to variant --- .../0010_rename_treatment_to_variant.py | 28 +++++++++ .../0011_rename_treatment_to_variant_data.py | 30 ++++++++++ wagtail_ab_testing/models.py | 60 +++++++++---------- .../static_src/style/progress.scss | 18 +++--- .../templates/wagtail_ab_testing/results.html | 20 +++---- .../test/tests/test_abtest_model.py | 22 +++---- .../test/tests/test_add_abtest.py | 4 +- .../test/tests/test_compare_draft.py | 2 +- .../test/tests/test_progress.py | 6 +- wagtail_ab_testing/test/tests/test_report.py | 2 +- wagtail_ab_testing/test/tests/test_results.py | 2 +- wagtail_ab_testing/test/tests/test_serve.py | 16 ++--- wagtail_ab_testing/views.py | 34 +++++------ wagtail_ab_testing/wagtail_hooks.py | 6 +- 14 files changed, 154 insertions(+), 96 deletions(-) create mode 100644 wagtail_ab_testing/migrations/0010_rename_treatment_to_variant.py create mode 100644 wagtail_ab_testing/migrations/0011_rename_treatment_to_variant_data.py diff --git a/wagtail_ab_testing/migrations/0010_rename_treatment_to_variant.py b/wagtail_ab_testing/migrations/0010_rename_treatment_to_variant.py new file mode 100644 index 0000000..1bc7e2c --- /dev/null +++ b/wagtail_ab_testing/migrations/0010_rename_treatment_to_variant.py @@ -0,0 +1,28 @@ +# Generated by Django 3.1.3 on 2020-12-16 13:40 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('wagtail_ab_testing', '0009_rename_variant_to_version'), + ] + + operations = [ + migrations.RenameField( + model_name='abtest', + old_name='treatment_revision', + new_name='variant_revision', + ), + migrations.AlterField( + model_name='abtest', + name='winning_version', + field=models.CharField(choices=[('control', 'Control'), ('variant', 'Variant')], max_length=9, null=True), + ), + migrations.AlterField( + model_name='abtesthourlylog', + name='version', + field=models.CharField(choices=[('control', 'Control'), ('variant', 'Variant')], max_length=9), + ), + ] diff --git a/wagtail_ab_testing/migrations/0011_rename_treatment_to_variant_data.py b/wagtail_ab_testing/migrations/0011_rename_treatment_to_variant_data.py new file mode 100644 index 0000000..b24245c --- /dev/null +++ b/wagtail_ab_testing/migrations/0011_rename_treatment_to_variant_data.py @@ -0,0 +1,30 @@ +# Generated by Django 3.1.3 on 2020-12-16 13:40 + +from django.db import migrations, models + + +def rename_treatment_to_variant_forwards(apps, schema_editor): + AbTest = apps.get_model('wagtail_ab_testing.AbTest') + AbTest.objects.filter(winning_version='treatment').update(winning_version='variant') + + AbTestHourlyLog = apps.get_model('wagtail_ab_testing.AbTestHourlyLog') + AbTestHourlyLog.objects.filter(version='treatment').update(version='variant') + + +def rename_treatment_to_variant_backwards(apps, schema_editor): + AbTest = apps.get_model('wagtail_ab_testing.AbTest') + AbTest.objects.filter(winning_version='variant').update(winning_version='treatment') + + AbTestHourlyLog = apps.get_model('wagtail_ab_testing.AbTestHourlyLog') + AbTestHourlyLog.objects.filter(version='variant').update(version='treatment') + + +class Migration(migrations.Migration): + + dependencies = [ + ('wagtail_ab_testing', '0010_rename_treatment_to_variant'), + ] + + operations = [ + migrations.RunPython(rename_treatment_to_variant_forwards, rename_treatment_to_variant_backwards) + ] diff --git a/wagtail_ab_testing/models.py b/wagtail_ab_testing/models.py index f7db3d4..5d3121c 100644 --- a/wagtail_ab_testing/models.py +++ b/wagtail_ab_testing/models.py @@ -27,7 +27,7 @@ class AbTest(models.Model): Represents an A/B test that has been set up by the user. The live page content is used as the control, the revision pointed to in - the `.treatment_revision` field contains the changes that are being tested. + the `.variant_revision` field contains the changes that are being tested. """ class Status(models.TextChoices): @@ -38,7 +38,7 @@ class Status(models.TextChoices): # These two sound similar, but there's a difference: # 'Finished' means that we've reached the sample size and testing has stopped - # but the user still needs to decide whether to publish the treatment version + # but the user still needs to decide whether to publish the variant version # or revert back to the control. # Once they've decided and that action has taken place, the test status is # updated to 'Completed'. @@ -47,18 +47,18 @@ class Status(models.TextChoices): class Version(models.TextChoices): CONTROL = 'control', __('Control') - TREATMENT = 'treatment', __('Treatment') + VARIANT = 'variant', __('Variant') class CompletionAction(models.TextChoices): # See docstring of the .complete() method for descriptions DO_NOTHING = 'do-nothing', "Do nothing" REVERT = 'revert', "Revert to control" - PUBLISH = 'publisn', "Publish treatment" + PUBLISH = 'publisn', "Publish variant" page = models.ForeignKey('wagtailcore.Page', on_delete=models.CASCADE, related_name='ab_tests') name = models.CharField(max_length=255) hypothesis = models.TextField(blank=True) - treatment_revision = models.ForeignKey('wagtailcore.PageRevision', on_delete=models.CASCADE, related_name='+') + variant_revision = models.ForeignKey('wagtailcore.PageRevision', on_delete=models.CASCADE, related_name='+') goal_event = models.CharField(max_length=255) goal_page = models.ForeignKey('wagtailcore.Page', null=True, blank=True, on_delete=models.SET_NULL, related_name='+') sample_size = models.PositiveIntegerField(validators=[MinValueValidator(1)]) @@ -150,7 +150,7 @@ def finish(self): Note that this doesn't 'complete' the test: a finished test means that testing is no longer happening. The test is not complete until the user decides on the outcome of the test (keep the control or - publish the treatment). This decision is set using the .complete() + publish the variant). This decision is set using the .complete() method. """ self.status = self.Status.FINISHED @@ -166,14 +166,14 @@ def complete(self, action, user=None): Actions can be: - AbTest.CompletionAction.DO_NOTHING - This just completes the test but does nothing to the page. The control will - remain the published version and the treatment will be + remain the published version and the variant will be in draft. - AbTest.CompletionAction.REVERT - This completes the test and also creates a new revision to revert the content back to what it was in the control while the test was taking place. - AbTest.CompletionAction.PUBLISH - This completes the test - and also publishes the treatment revision. + and also publishes the variant revision. """ self.status = self.Status.COMPLETED self.save(update_fields=['status']) @@ -186,7 +186,7 @@ def complete(self, action, user=None): self.page.save_revision(user=user, log_action='wagtail.revert').publish(user=user) elif action == AbTest.CompletionAction.PUBLISH: - self.treatment_revision.publish(user=user) + self.variant_revision.publish(user=user) def add_participant(self, version=None): """ @@ -195,23 +195,23 @@ def add_participant(self, version=None): # Get current numbers of participants for each version stats = self.hourly_logs.aggregate( control_participants=Sum('participants', filter=Q(version=self.Version.CONTROL)), - treatment_participants=Sum('participants', filter=Q(version=self.Version.TREATMENT)), + variant_participants=Sum('participants', filter=Q(version=self.Version.VARIANT)), ) control_participants = stats['control_participants'] or 0 - treatment_participants = stats['treatment_participants'] or 0 + variant_participants = stats['variant_participants'] or 0 # Create an equal number of participants for each version if version is None: - if treatment_participants > control_participants: + if variant_participants > control_participants: version = self.Version.CONTROL - elif treatment_participants < control_participants: - version = self.Version.TREATMENT + elif variant_participants < control_participants: + version = self.Version.VARIANT else: version = random.choice([ self.Version.CONTROL, - self.Version.TREATMENT, + self.Version.VARIANT, ]) # Add new participant to statistics model @@ -222,7 +222,7 @@ def add_participant(self, version=None): # get a chance to turn into conversions. It's unlikely to make a # significant difference to the results. # Note: Adding 1 to account for the new participant - if control_participants + treatment_participants + 1 >= self.sample_size: + if control_participants + variant_participants + 1 >= self.sample_size: self.finish() return version @@ -240,7 +240,7 @@ def check_for_winner(self): """ Performs a Chi-Squared test to check if there is a clear winner. - Returns Version.CONTROL or Version.TREATMENT if there is one. Otherwise, it returns None. + Returns Version.CONTROL or Version.VARIANT if there is one. Otherwise, it returns None. For more information on what the Chi-Squared test does, see: https://www.evanmiller.org/ab-testing/chi-squared.html @@ -250,30 +250,30 @@ def check_for_winner(self): stats = self.hourly_logs.aggregate( control_participants=Sum('participants', filter=Q(version=self.Version.CONTROL)), control_conversions=Sum('conversions', filter=Q(version=self.Version.CONTROL)), - treatment_participants=Sum('participants', filter=Q(version=self.Version.TREATMENT)), - treatment_conversions=Sum('conversions', filter=Q(version=self.Version.TREATMENT)), + variant_participants=Sum('participants', filter=Q(version=self.Version.VARIANT)), + variant_conversions=Sum('conversions', filter=Q(version=self.Version.VARIANT)), ) control_participants = stats['control_participants'] or 0 control_conversions = stats['control_conversions'] or 0 - treatment_participants = stats['treatment_participants'] or 0 - treatment_conversions = stats['treatment_conversions'] or 0 + variant_participants = stats['variant_participants'] or 0 + variant_conversions = stats['variant_conversions'] or 0 - if not control_conversions and not treatment_conversions: + if not control_conversions and not variant_conversions: return - if control_conversions > control_participants or treatment_conversions > treatment_participants: + if control_conversions > control_participants or variant_conversions > variant_participants: # Something's up. I'm sure it's already clear in the UI what's going on, so let's not crash return # Create a numpy array with values to pass in to Chi-Squared test control_failures = control_participants - control_conversions - treatment_failures = treatment_participants - treatment_conversions + variant_failures = variant_participants - variant_conversions - if control_failures == 0 and treatment_failures == 0: + if control_failures == 0 and variant_failures == 0: # Prevent this error: "The internally computed table of expected frequencies has a zero element at (0, 1)." return - T = np.array([[control_conversions, control_failures], [treatment_conversions, treatment_failures]]) + T = np.array([[control_conversions, control_failures], [variant_conversions, variant_failures]]) # Perform Chi-Squared test p = scipy.stats.chi2_contingency(T, correction=False)[1] @@ -283,10 +283,10 @@ def check_for_winner(self): if 1 - p > required_confidence_level: # There is a clear winner! # Return the one with the highest success rate - if (control_conversions / control_participants) > (treatment_conversions / treatment_participants): + if (control_conversions / control_participants) > (variant_conversions / variant_participants): return self.Version.CONTROL else: - return self.Version.TREATMENT + return self.Version.VARIANT def get_status_description(self): """ @@ -303,8 +303,8 @@ def get_status_description(self): if self.winning_version == AbTest.Version.CONTROL: return status + " (" + _("Control won") + ")" - elif self.winning_version == AbTest.Version.TREATMENT: - return status + " (" + _("Treatment won") + ")" + elif self.winning_version == AbTest.Version.VARIANT: + return status + " (" + _("Variant won") + ")" else: return status + " (" + _("No clear winner") + ")" diff --git a/wagtail_ab_testing/static_src/style/progress.scss b/wagtail_ab_testing/static_src/style/progress.scss index 034ebb5..848a68a 100644 --- a/wagtail_ab_testing/static_src/style/progress.scss +++ b/wagtail_ab_testing/static_src/style/progress.scss @@ -1,7 +1,7 @@ @import 'vendor/c3.min.css'; $color-control: #0c0073; -$color-treatment: #ef746f; +$color-variant: #ef746f; $light-teal: #e1f0f0; $dark-teal: #007273; @@ -63,16 +63,16 @@ $charcoal-grey: #333; } } - &--treatment { + &--variant { right: 0; padding-left: 10px; - color: $color-treatment; + color: $color-variant; a { - color: $color-treatment !important; + color: $color-variant !important; &:hover { - color: darken($color-treatment, 10%) !important; + color: darken($color-variant, 10%) !important; } } } @@ -105,15 +105,15 @@ $charcoal-grey: #333; &__version--control &__version-heading { border-bottom-color: $color-control; } - &__version--treatment &__version-heading { - border-bottom-color: $color-treatment; + &__version--variant &__version-heading { + border-bottom-color: $color-variant; } &__version--control &__version-heading--winner { background-color: $color-control; } - &__version--treatment &__version-heading--winner { - background-color: $color-treatment; + &__version--variant &__version-heading--winner { + background-color: $color-variant; } &__version-inner { diff --git a/wagtail_ab_testing/templates/wagtail_ab_testing/results.html b/wagtail_ab_testing/templates/wagtail_ab_testing/results.html index 7b13c63..bc6fea7 100644 --- a/wagtail_ab_testing/templates/wagtail_ab_testing/results.html +++ b/wagtail_ab_testing/templates/wagtail_ab_testing/results.html @@ -15,7 +15,7 @@

{{ ab_test.name }}

Add test participants Add conversions for control - Add conversions for treatment + Add conversions for variant
@@ -78,17 +78,17 @@

{% trans "Control" %} {% icon n {% endif %}

-
-
- {% if treatment_is_winner %}{% icon "crown" %} {% trans "Winner!" %}{% elif unclear_winner %}{% trans "No clear winner" %}{% endif %} +
+
+ {% if variant_is_winner %}{% icon "crown" %} {% trans "Winner!" %}{% elif unclear_winner %}{% trans "No clear winner" %}{% endif %}
@@ -116,7 +116,7 @@

{% trans "Treatment" %} Compare pages and see how the control and treatment pages differ. + Compare pages and see how the control and variant pages differ. {% endblocktrans %}

{% trans "Conversions over time" %}

diff --git a/wagtail_ab_testing/test/tests/test_abtest_model.py b/wagtail_ab_testing/test/tests/test_abtest_model.py index 46d78c8..8e0e4df 100644 --- a/wagtail_ab_testing/test/tests/test_abtest_model.py +++ b/wagtail_ab_testing/test/tests/test_abtest_model.py @@ -16,7 +16,7 @@ def setUp(self): self.ab_test = AbTest.objects.create( page=home_page, name="Test", - treatment_revision=revision, + variant_revision=revision, goal_event="foo", sample_size=10, ) @@ -63,7 +63,7 @@ def test_log_conversion(self): self.assertEqual(log.participants, 0) self.assertEqual(log.conversions, 2) - def set_up_test(self, control_participants, control_conversions, treatment_participants, treatment_conversions): + def set_up_test(self, control_participants, control_conversions, variant_participants, variant_conversions): AbTestHourlyLog.objects.create( ab_test=self.ab_test, version=AbTest.Version.CONTROL, @@ -75,11 +75,11 @@ def set_up_test(self, control_participants, control_conversions, treatment_parti AbTestHourlyLog.objects.create( ab_test=self.ab_test, - version=AbTest.Version.TREATMENT, + version=AbTest.Version.VARIANT, date=datetime.date(2020, 11, 4), hour=22, - participants=treatment_participants, - conversions=treatment_conversions, + participants=variant_participants, + conversions=variant_conversions, ) def test_check_for_winner_no_data(self): @@ -92,27 +92,27 @@ def test_check_control_clearly_wins(self): self.assertEqual(self.ab_test.check_for_winner(), AbTest.Version.CONTROL) - def test_check_treatment_clearly_wins(self): + def test_check_variantarly_wins(self): self.set_up_test(100, 20, 100, 80) - self.assertEqual(self.ab_test.check_for_winner(), AbTest.Version.TREATMENT) + self.assertEqual(self.ab_test.check_for_winner(), AbTest.Version.VARIANT) def test_control_just_wins(self): self.set_up_test(100, 64, 100, 50) self.assertEqual(self.ab_test.check_for_winner(), AbTest.Version.CONTROL) - def test_treatment_just_wins(self): + def test_variantt_wins(self): self.set_up_test(100, 50, 100, 64) - self.assertEqual(self.ab_test.check_for_winner(), AbTest.Version.TREATMENT) + self.assertEqual(self.ab_test.check_for_winner(), AbTest.Version.VARIANT) def test_close_leaning_control(self): self.set_up_test(100, 62, 100, 50) self.assertIsNone(self.ab_test.check_for_winner()) - def test_close_leaning_treatment(self): + def test_close_leaning_variant(self): self.set_up_test(100, 50, 100, 62) self.assertIsNone(self.ab_test.check_for_winner()) @@ -133,7 +133,7 @@ def setUp(self): self.ab_test = AbTest.objects.create( page=self.home_page, name="Test", - treatment_revision=revision, + variant_revision=revision, goal_event="foo", sample_size=10, ) diff --git a/wagtail_ab_testing/test/tests/test_add_abtest.py b/wagtail_ab_testing/test/tests/test_add_abtest.py index 163fd10..ac34bf4 100644 --- a/wagtail_ab_testing/test/tests/test_add_abtest.py +++ b/wagtail_ab_testing/test/tests/test_add_abtest.py @@ -65,7 +65,7 @@ def _create_abtest(self, status): AbTest.objects.create( page=self.page, name="Test", - treatment_revision=self.page.get_latest_revision(), + variant_revision=self.page.get_latest_revision(), status=status, sample_size=100, ) @@ -176,7 +176,7 @@ def test_post_add_form(self): ab_test = AbTest.objects.get() self.assertEqual(ab_test.page, self.page.page_ptr) - self.assertEqual(ab_test.treatment_revision, self.latest_revision) + self.assertEqual(ab_test.variant_revision, self.latest_revision) self.assertEqual(ab_test.name, 'Test') self.assertEqual(ab_test.hypothesis, 'Does changing the title to "Donate now!" increase donations?') self.assertEqual(ab_test.goal_event, 'visit-page') diff --git a/wagtail_ab_testing/test/tests/test_compare_draft.py b/wagtail_ab_testing/test/tests/test_compare_draft.py index 42366a0..7950dfb 100644 --- a/wagtail_ab_testing/test/tests/test_compare_draft.py +++ b/wagtail_ab_testing/test/tests/test_compare_draft.py @@ -19,7 +19,7 @@ def setUp(self): self.ab_test = AbTest.objects.create( page=self.page, name="Test", - treatment_revision=self.page.get_latest_revision(), + variant_revision=self.page.get_latest_revision(), status=AbTest.Status.RUNNING, sample_size=100, ) diff --git a/wagtail_ab_testing/test/tests/test_progress.py b/wagtail_ab_testing/test/tests/test_progress.py index f162622..c3111c7 100644 --- a/wagtail_ab_testing/test/tests/test_progress.py +++ b/wagtail_ab_testing/test/tests/test_progress.py @@ -33,7 +33,7 @@ def setUp(self): self.ab_test = AbTest.objects.create( page=self.page, name="Test", - treatment_revision=revision, + variant_revision=revision, status=AbTest.Status.RUNNING, sample_size=100, ) @@ -181,12 +181,12 @@ def test_post_select_control(self): self.assertEqual(self.ab_test.page.title, "Test") self.assertFalse(self.ab_test.page.has_unpublished_changes) - def test_post_select_treatment(self): + def test_post_select_variant(self): self.ab_test.status = AbTest.Status.FINISHED self.ab_test.save() response = self.client.post(reverse('wagtailadmin_pages:edit', args=[self.page.id]), { - 'action-select-treatment': 'on', + 'action-select-variant': 'on', }) self.assertRedirects(response, reverse('wagtailadmin_pages:edit', args=[self.page.id])) diff --git a/wagtail_ab_testing/test/tests/test_report.py b/wagtail_ab_testing/test/tests/test_report.py index 519a4bf..0d899ad 100644 --- a/wagtail_ab_testing/test/tests/test_report.py +++ b/wagtail_ab_testing/test/tests/test_report.py @@ -29,7 +29,7 @@ def setUp(self): self.ab_test = AbTest.objects.create( page=self.page, name="Test", - treatment_revision=self.page.get_latest_revision(), + variant_revision=self.page.get_latest_revision(), status=AbTest.Status.RUNNING, sample_size=100, ) diff --git a/wagtail_ab_testing/test/tests/test_results.py b/wagtail_ab_testing/test/tests/test_results.py index 68f2f52..013f197 100644 --- a/wagtail_ab_testing/test/tests/test_results.py +++ b/wagtail_ab_testing/test/tests/test_results.py @@ -29,7 +29,7 @@ def setUp(self): self.ab_test = AbTest.objects.create( page=self.page, name="Test", - treatment_revision=self.page.get_latest_revision(), + variant_revision=self.page.get_latest_revision(), status=AbTest.Status.COMPLETED, sample_size=100, ) diff --git a/wagtail_ab_testing/test/tests/test_serve.py b/wagtail_ab_testing/test/tests/test_serve.py index ca1baef..9f11517 100644 --- a/wagtail_ab_testing/test/tests/test_serve.py +++ b/wagtail_ab_testing/test/tests/test_serve.py @@ -12,15 +12,15 @@ def setUp(self): self.ab_test = AbTest.objects.create( page=self.home_page, name="Test", - treatment_revision=revision, + variant_revision=revision, goal_event="foo", sample_size=10, ) def test_serves_control(self): - # Add a participant for treatment + # Add a participant for variant # This will make the new participant use control to balance the numbers - self.ab_test.add_participant(AbTest.Version.TREATMENT) + self.ab_test.add_participant(AbTest.Version.VARIANT) response = self.client.get('/') self.assertEqual(response.status_code, 200) @@ -30,9 +30,9 @@ def test_serves_control(self): self.assertEqual(self.client.session[f'wagtail-ab-testing_{self.ab_test.id}_version'], AbTest.Version.CONTROL) - def test_serves_treatment(self): + def test_serves_variant(self): # Add a participant for control - # This will make the new participant use treatment to balance the numbers + # This will make the new participant use variant to balance the numbers self.ab_test.add_participant(AbTest.Version.CONTROL) response = self.client.get('/') @@ -41,11 +41,11 @@ def test_serves_treatment(self): self.assertNotContains(response, "Welcome to your new Wagtail site!") self.assertContains(response, "Changed title") - self.assertEqual(self.client.session[f'wagtail-ab-testing_{self.ab_test.id}_version'], AbTest.Version.TREATMENT) + self.assertEqual(self.client.session[f'wagtail-ab-testing_{self.ab_test.id}_version'], AbTest.Version.VARIANT) def test_doesnt_track_bots(self): # Add a participant for control - # This will make it serve the treatment if it does incorrectly decide to track the user + # This will make it serve the variant if it does incorrectly decide to track the user self.ab_test.add_participant(AbTest.Version.CONTROL) response = self.client.get( @@ -61,7 +61,7 @@ def test_doesnt_track_bots(self): def test_doesnt_track_dnt_users(self): # Add a participant for control - # This will make it serve the treatment if it does incorrectly decide to track the user + # This will make it serve the variant if it does incorrectly decide to track the user self.ab_test.add_participant(AbTest.Version.CONTROL) response = self.client.get('/', HTTP_DNT='1') diff --git a/wagtail_ab_testing/views.py b/wagtail_ab_testing/views.py index bd85658..49f606e 100644 --- a/wagtail_ab_testing/views.py +++ b/wagtail_ab_testing/views.py @@ -35,10 +35,10 @@ def __init__(self, *args, **kwargs): for slug, goal in EVENT_TYPES.items() ] - def save(self, page, treatment_revision, user): + def save(self, page, variant_revision, user): ab_test = super().save(commit=False) ab_test.page = page - ab_test.treatment_revision = treatment_revision + ab_test.variant_revision = variant_revision ab_test.created_by = user ab_test.save() return ab_test @@ -234,15 +234,15 @@ def get_progress_and_results_common_context(request, page, ab_test): stats = ab_test.hourly_logs.aggregate( control_participants=Sum('participants', filter=Q(version=AbTest.Version.CONTROL)), control_conversions=Sum('conversions', filter=Q(version=AbTest.Version.CONTROL)), - treatment_participants=Sum('participants', filter=Q(version=AbTest.Version.TREATMENT)), - treatment_conversions=Sum('conversions', filter=Q(version=AbTest.Version.TREATMENT)), + variant_participants=Sum('participants', filter=Q(version=AbTest.Version.VARIANT)), + variant_conversions=Sum('conversions', filter=Q(version=AbTest.Version.VARIANT)), ) control_participants = stats['control_participants'] or 0 control_conversions = stats['control_conversions'] or 0 - treatment_participants = stats['treatment_participants'] or 0 - treatment_conversions = stats['treatment_conversions'] or 0 + variant_participants = stats['variant_participants'] or 0 + variant_conversions = stats['variant_conversions'] or 0 - current_sample_size = control_participants + treatment_participants + current_sample_size = control_participants + variant_participants estimated_completion_date = None if ab_test.status == AbTest.Status.RUNNING and current_sample_size: @@ -256,14 +256,14 @@ def get_progress_and_results_common_context(request, page, ab_test): # Generate time series data for the chart time_series = [] control = 0 - treatment = 0 + variant = 0 date = None for log in ab_test.hourly_logs.order_by('date', 'hour'): # Accumulate the conversions if log.version == AbTest.Version.CONTROL: control += log.conversions else: - treatment += log.conversions + variant += log.conversions while date is None or date < log.date: if date is None: @@ -277,7 +277,7 @@ def get_progress_and_results_common_context(request, page, ab_test): time_series.append({ 'date': date, 'control': control, - 'treatment': treatment, + 'variant': variant, }) return { @@ -288,11 +288,11 @@ def get_progress_and_results_common_context(request, page, ab_test): 'control_conversions': control_conversions, 'control_participants': control_participants, 'control_conversions_percent': int(control_conversions / control_participants * 100) if control_participants else 0, - 'treatment_conversions': treatment_conversions, - 'treatment_participants': treatment_participants, - 'treatment_conversions_percent': int(treatment_conversions / treatment_participants * 100) if treatment_participants else 0, + 'variant_conversions': variant_conversions, + 'variant_participants': variant_participants, + 'variant_conversions_percent': int(variant_conversions / variant_participants * 100) if variant_participants else 0, 'control_is_winner': ab_test.winning_version == AbTest.Version.CONTROL, - 'treatment_is_winner': ab_test.winning_version == AbTest.Version.TREATMENT, + 'variant_is_winner': ab_test.winning_version == AbTest.Version.VARIANT, 'unclear_winner': ab_test.status in [AbTest.Status.FINISHED, ab_test.Status.COMPLETED] and ab_test.winning_version is None, 'estimated_completion_date': estimated_completion_date, 'chart_data': json.dumps({ @@ -300,7 +300,7 @@ def get_progress_and_results_common_context(request, page, ab_test): 'columns': [ ['x'] + [data_point['date'].isoformat() for data_point in time_series], [_("Control")] + [data_point['control'] for data_point in time_series], - [_("Treatment")] + [data_point['treatment'] for data_point in time_series], + [_("Variant")] + [data_point['variant'] for data_point in time_series], ], 'type': 'spline', }), @@ -355,12 +355,12 @@ def progress(request, page, ab_test): else: messages.error(request, _("The A/B test cannot be paused because it is not running.")) - elif 'action-select-treatment' in request.POST: + elif 'action-select-variant' in request.POST: if ab_test.status == AbTest.Status.FINISHED: # TODO Permission check? ab_test.complete(AbTest.CompletionAction.PUBLISH, user=request.user) - messages.success(request, _("The treatment version has been published.")) + messages.success(request, _("The variant version has been published.")) else: messages.error(request, _("The A/B test cannot be paused because it is not running.")) diff --git a/wagtail_ab_testing/wagtail_hooks.py b/wagtail_ab_testing/wagtail_hooks.py index 8a95167..e20887f 100644 --- a/wagtail_ab_testing/wagtail_hooks.py +++ b/wagtail_ab_testing/wagtail_hooks.py @@ -123,9 +123,9 @@ def before_serve_page(page, request, serve_args, serve_kwargs): if f'wagtail-ab-testing_{test.id}_version' not in request.session: request.session[f'wagtail-ab-testing_{test.id}_version'] = test.add_participant() - # If the user is visiting the treatment version, serve that from the revision - if request.session[f'wagtail-ab-testing_{test.id}_version'] == AbTest.Version.TREATMENT: - return test.treatment_revision.as_page_object().serve(request, *serve_args, **serve_kwargs) + # If the user is visiting the variant version, serve that from the revision + if request.session[f'wagtail-ab-testing_{test.id}_version'] == AbTest.Version.VARIANT: + return test.variant_revision.as_page_object().serve(request, *serve_args, **serve_kwargs) class AbTestingReportMenuItem(MenuItem):