Skip to content
This repository has been archived by the owner on Aug 22, 2022. It is now read-only.

Commit

Permalink
Merge pull request #145 from open-craft/smarnach/load-balancer
Browse files Browse the repository at this point in the history
Load Balancer, Instant Switchover & SSL Support
  • Loading branch information
Sven Marnach committed Nov 7, 2016
2 parents fe07466 + 486a321 commit 2bfa62c
Show file tree
Hide file tree
Showing 43 changed files with 1,224 additions and 362 deletions.
6 changes: 4 additions & 2 deletions .env.test
Original file line number Diff line number Diff line change
Expand Up @@ -16,5 +16,7 @@ GITHUB_ACCESS_TOKEN='test-token'
WATCH_ORGANIZATION='test-org'
WATCH_FORK='watched/fork'
BASE_HANDLERS='["file"]'

BACKUP_SWIFT_ENABLED = true
BACKUP_SWIFT_ENABLED = true
PRELIMINARY_PAGE_SERVER_IP='47.11.08.15'
DEFAULT_LOAD_BALANCING_SERVER='ubuntu@haproxy-test.fake.domain'
LOAD_BALANCER_FRAGMENT_NAME_PREFIX='opencraft-'
16 changes: 16 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,22 @@ flavor:
* `OPENSTACK_AUTH_URL`: Your openstack auth url (required)
* `OPENSTACK_REGION`: The openstack region to deploy sandboxes in (required)

### Load balancer settings
* `DEFAULT_LOAD_BALANCING_SERVER`: The load-balancing server to be used in the
form `ssh_username@domain.name`. The server will be represented as an
instance of the LoadBalancingServer model in the database. It is possible to
create multiple instances of that model. This setting exists mainly to make
it easier to add a load-balancing server in testing and development
environments.
* `LOAD_BALANCER_FRAGMENT_NAME_PREFIX`: A prefix prepended to the filename of
the configuration fragments added to the load balancer. This serves mainly
the purpose of making the fragments easier to recognise, and it should be set
to a value identifying the instance manager installation.
* `PRELIMINARY_PAGE_SERVER_IP`: The IP address requests will be relayed to by
the load balancer when no AppServer is active (e.g. during the deployment of
the first AppServer.) This can point to a static page informing the user that
the instance is currently being deployed.

### DNS settings

* `DEFAULT_INSTANCE_BASE_DOMAIN`: Instances are created as subdomains of this domain,
Expand Down
1 change: 1 addition & 0 deletions circle.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ machine:
DEBUG: 'true'
DEFAULT_FORK: 'open-craft/edx-platform'
INSTANCE_EPHEMERAL_DATABASES: 'false'
LOAD_BALANCER_FRAGMENT_NAME_PREFIX: 'integration-'
TEST_RUNNER: 'opencraft.tests.utils.CircleCIParallelTestRunner'
WATCH_FORK: 'open-craft/edx-platform'
WATCH_ORGANIZATION: 'open-craft'
Expand Down
44 changes: 44 additions & 0 deletions instance/ansible.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@

from django.conf import settings

from instance.utils import poll_streams


# Logging #####################################################################

Expand Down Expand Up @@ -161,3 +163,45 @@ def run_playbook(requirements_path, inventory_str, vars_str, playbook_path, play
shell=True,
env=env,
)


def capture_playbook_output(
requirements_path, inventory_str, vars_str, playbook_path, username='root', logger_=None, collect_logs=False
):
"""
Convenience wrapper for run_playbook() that captures the output of the playbook run.
"""
with run_playbook(
requirements_path=requirements_path,
inventory_str=inventory_str,
vars_str=vars_str,
playbook_path=os.path.dirname(playbook_path),
playbook_name=os.path.basename(playbook_path),
username=username,
) as process:
try:
log_line_generator = poll_streams(
process.stdout,
process.stderr,
line_timeout=settings.ANSIBLE_LINE_TIMEOUT,
global_timeout=settings.ANSIBLE_GLOBAL_TIMEOUT,
)
log_lines = []
for f, line in log_line_generator:
line = line.decode('utf-8').rstrip()
if logger_ is not None:
if f == process.stdout:
logger_.info(line)
elif f == process.stderr:
logger_.error(line)
if collect_logs:
log_lines.append(line)
except TimeoutError:
if logger_ is not None:
logger_.error('Playbook run timed out. Terminating the Ansible process.')
process.terminate()
process.wait()
if collect_logs:
return log_lines, process.returncode
else:
return process.returncode
55 changes: 44 additions & 11 deletions instance/gandi.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,28 +99,61 @@ def set_zone_version(self, zone_id, zone_version_id):
"""
return self.client_zone.version.set(self.api_key, zone_id, zone_version_id)

def set_dns_record(self, domain, attempts=4, retry_delay=1, **record):
def _dns_operation(self, callback, domain, log_msg, attempts=4, retry_delay=1):
"""
Set a DNS record - Automatically create a new version, update with the change & activate
This method takes the mandatory `domain` parameter to be able to support multiple domains,
handled by the same Gandi account
Encapsulate logic that is common to high-level DNS operations: grab the global lock, get the
zone_id for a domain, create a new zone version, activate the zone version after successful
update, and retry the whole procedure multiple times if necessary.
"""
if 'ttl' not in record.keys():
record['ttl'] = 1200

with cache.lock('gandi_set_dns_record'): # Only do one DNS update at a time
for i in range(1, attempts + 1):
try:
logger.info('Setting DNS record: %s (attempt %d out of %d)', record, i, attempts)
logger.info('%s (attempt %d out of %d)', log_msg, i, attempts)
zone_id = self.get_zone_id(domain)
new_zone_version = self.create_new_zone_version(zone_id)
self.delete_dns_record(zone_id, new_zone_version, record['name'])
returned_record = self.add_dns_record(zone_id, new_zone_version, record)
result = callback(zone_id, new_zone_version)
self.set_zone_version(zone_id, new_zone_version)
break
except xmlrpc.client.Fault:
if i == attempts:
raise
time.sleep(retry_delay)
retry_delay *= 2
return returned_record
return result

def set_dns_record(self, domain, **record):
"""
Set a DNS record. This method takes the mandatory `domain` parameter to be able to support
multiple domains, handled by the same Gandi account.
"""
if 'ttl' not in record.keys():
record['ttl'] = 1200

def set_dns_record_callback(zone_id, zone_version):
"""
Callback to be passed to _dns_operation().
"""
self.delete_dns_record(zone_id, zone_version, record['name'])
return self.add_dns_record(zone_id, zone_version, record)

self._dns_operation(
callback=set_dns_record_callback,
domain=domain,
log_msg='Setting DNS record: {}'.format(record),
)

def remove_dns_record(self, domain, name):
"""
Remove the given name for the domain.
"""
def remove_dns_record_callback(zone_id, zone_version):
"""
Callback to be passed to _dns_operation().
"""
self.delete_dns_record(zone_id, zone_version, name)

self._dns_operation(
callback=remove_dns_record_callback,
domain=domain,
log_msg='Deleting DNS record: {}'.format(name),
)
90 changes: 0 additions & 90 deletions instance/logger_adapter.py

This file was deleted.

17 changes: 17 additions & 0 deletions instance/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,3 +95,20 @@ def emit(self, record):
# TODO: Filter out log entries for which the user doesn't have view rights
# TODO: More targetted events - only emit events for what the user is looking at
publish_data('log', log_event)


class ModelLoggerAdapter(logging.LoggerAdapter):
"""
Custom LoggerAdapter for model instances.
The model instance must be included under the key "obj" when constructing the logger
adpater. The adapter includes information on the associated model instance by calling
the format_log_message() method on the instance.
"""
def process(self, msg, kwargs):
msg, kwargs = super().process(msg, kwargs)
annotation = self.extra['obj'].get_log_message_annotation()
if annotation:
return "{} | {}".format(annotation, msg), kwargs
else:
return msg, kwargs
27 changes: 12 additions & 15 deletions instance/management/commands/activity_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@

from instance import ansible
from instance.models.openedx_instance import OpenEdXInstance
from instance.utils import poll_streams
from registration.models import BetaTestApplication


# Classes #####################################################################


class Command(BaseCommand):
"""
Activity_csv management command class
Expand Down Expand Up @@ -70,7 +70,7 @@ def handle(self, *args, **options):

self.activity_csv(out)

def activity_csv(self, out): # pylint: disable=too-many-locals,missing-docstring
def activity_csv(self, out): # pylint: disable=missing-docstring
# Produce a mapping of public IPs (of active app servers) to parent instances.
active_appservers = {
instance.active_appserver.server.public_ip: instance for instance in OpenEdXInstance.objects.all()
Expand All @@ -89,28 +89,25 @@ def activity_csv(self, out): # pylint: disable=too-many-locals,missing-docstrin
inventory = '[apps]\n{servers}'.format(servers='\n'.join(active_appservers.keys()))
playbook_path = os.path.join(settings.SITE_ROOT, 'playbooks/collect_activity/collect_activity.yml')

def log_line(line):
"""Helper to pass to capture_playbook_output()."""
self.stderr.write(self.style.SUCCESS(line)) # pylint: disable=no-member
log_line.info = log_line
log_line.error = log_line

# Launch the collect_activity playbook, which places a set of files into the `playbook_output_dir`
# on this host.
with ansible.run_playbook(
ansible.capture_playbook_output(
requirements_path=os.path.join(os.path.dirname(playbook_path), 'requirements.txt'),
inventory_str=inventory,
vars_str=(
'local_output_dir: {output_dir}\n'
'remote_output_filename: /tmp/activity_report'
).format(output_dir=playbook_output_dir),
playbook_path=os.path.dirname(playbook_path),
playbook_name=os.path.basename(playbook_path),
playbook_path=playbook_path,
username=settings.OPENSTACK_SANDBOX_SSH_USERNAME,
) as process:
log_line_generator = poll_streams(
process.stdout,
process.stderr,
)
for _, line in log_line_generator:
line = line.decode('utf-8').rstrip()
# Forward all lines from both channels to stderr. stdout is optionally used for the CSV output.
self.stderr.write(self.style.SUCCESS(line)) # pylint: disable=no-member
process.wait()
logger_=log_line,
)

csv_writer = csv.writer(out, quoting=csv.QUOTE_NONNUMERIC)
csv_writer.writerow([
Expand Down
41 changes: 41 additions & 0 deletions instance/migrations/0062_load_balancer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-11-03 14:41
from __future__ import unicode_literals

from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import instance.models.utils


class Migration(migrations.Migration):

dependencies = [
('instance', '0061_auto_20161028_1917'),
]

operations = [
migrations.CreateModel(
name='LoadBalancingServer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('domain', models.CharField(max_length=100, unique=True)),
('ssh_username', models.CharField(max_length=32)),
('accepts_new_backends', models.BooleanField(default=True)),
('fragment_name_postfix', models.CharField(blank=True, max_length=8)),
],
options={
'abstract': False,
'ordering': ('-modified', '-created'),
'get_latest_by': 'modified',
},
bases=(instance.models.utils.ValidateModelMixin, models.Model),
),
migrations.AddField(
model_name='openedxinstance',
name='load_balancing_server',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='instance.LoadBalancingServer'),
),
]
Loading

0 comments on commit 2bfa62c

Please sign in to comment.