Compare commits

...

77 commits

Author SHA1 Message Date
27efcda1d8 Adds azure diagram 2024-12-10 15:42:42 -05:00
d91cb0e245 set theme to red for ETS 2024-12-10 15:27:11 -05:00
MathieuSevignyLavallee
04ccca91d6 ajustement 2024-12-10 13:32:18 -05:00
dd0f5f9534 fix dns issue stress-test 2024-12-09 22:17:27 -05:00
a2d83f4f77 Merge branch 'dev-it3-PFEA2024' into dev-it3-it4-PFEA2024 2024-12-09 21:37:29 -05:00
b97454d9a9 Adds logs back in docker 2024-12-09 21:34:23 -05:00
fserres
75e669b8b4 Add deployment with opentofu 2024-12-09 14:58:48 -05:00
fserres
3ef37f6dc4 Ajout de la doc opentofu 2024-12-09 12:55:13 -05:00
MathieuSevignyLavallee
b60961acea ajout documentation test de charge 2024-12-08 22:24:15 -05:00
MathieuSevignyLavallee
e7eede36be Merge branch 'stress-test-socket' into dev-it3-it4-PFEA2024 2024-12-08 21:45:41 -05:00
MathieuSevignyLavallee
81186b6a35 move test_metric to class and rename file 2024-12-08 15:00:55 -05:00
MathieuSevignyLavallee
fff5830afd Add README, implementation de l'api Docker sur le quizroom, Fonctionnel
remove docker group

graph generator remake

Docker API implementation
2024-12-07 22:01:55 -05:00
63cdd03c14 Ajout de la configuration nginx + quizroom + healtchecks 2024-12-07 17:02:34 -05:00
dabdfafd35 fix quizroom docker checks 2024-12-07 16:40:53 -05:00
32a41d93aa adds branch creation script 2024-12-07 16:00:14 -05:00
MathieuSevignyLavallee
bb4ef54db9 new approach using cgroup 2024-12-07 15:41:21 -05:00
b3d65e0a1e force lf ending of script 2024-12-07 15:05:24 -05:00
567a765f94 fixed nginx 2024-12-07 14:58:15 -05:00
5c75347887 Adds basic health checks 2024-12-06 21:01:23 -05:00
8eab2d3a05 Adds image validation + download 2024-12-06 19:40:45 -05:00
MathieuSevignyLavallee
1a7be0ad79 ajout de metrique de sommaire 2024-12-06 19:31:48 -05:00
MathieuSevignyLavallee
1e67762b5e cleanup 2024-12-06 19:12:29 -05:00
d3199b9f3f adds env for network name 2024-12-06 18:35:32 -05:00
d9e5d6a91f Merge branch 'stress-test-socket' of github.com:ets-cfuhrman-pfe/EvalueTonSavoir into stress-test-socket 2024-12-06 18:19:25 -05:00
d4e13b8c36 template to add env variables 2024-12-06 18:19:23 -05:00
MathieuSevignyLavallee
18d3ded4fa Test de charge et graph
This reverts commit 6d988c347f.

typo gitignore

Create test.txt

gitignore

pas terminer a besoin de pofinage
2024-12-06 14:23:19 -05:00
MathieuSevignyLavallee
3744bf4347 typo gitignore 2024-12-06 11:26:35 -05:00
MathieuSevignyLavallee
ec0cc48ae7 Create test.txt 2024-12-06 11:25:15 -05:00
MathieuSevignyLavallee
20fe4e673a Revert "gitignore"
This reverts commit 6d988c347f.
2024-12-06 11:24:57 -05:00
MathieuSevignyLavallee
6d988c347f gitignore 2024-12-06 11:21:10 -05:00
MathieuSevignyLavallee
80610c3a6e pas terminer a besoin de pofinage 2024-12-05 20:24:56 -05:00
Jerry Kwok
58a55ed176 ansible documentation 2024-11-30 16:36:56 -05:00
MathieuSevignyLavallee
5a3f965c58 Update main.js 2024-11-28 17:56:56 -05:00
MathieuSevignyLavallee
ec15909d55 working 2024-11-28 15:09:22 -05:00
71353669ca adds env to main 2024-11-27 21:09:24 -05:00
15144244ad Adds docker tests 2024-11-27 21:07:21 -05:00
MathieuSevignyLavallee
0af9b099fd nginx conf 2024-11-27 21:00:52 -05:00
4f72dc7b9b clean diagram 2024-11-27 19:24:41 -05:00
MathieuSevignyLavallee
49fbdb1ffd refactor 2024-11-27 18:36:59 -05:00
fa95b9003f fixes plantuml url + adds back deployment diagram 2024-11-27 15:52:32 -05:00
2176edf7d0 Adds documentation to IT3 - less branches 2024-11-27 14:47:54 -05:00
6883774ed4
Merge pull request #168 from ets-cfuhrman-pfe/dev-it3-cache
Adds cache to nginx
2024-11-26 22:52:10 -05:00
MathieuSevignyLavallee
11222c70bd not finished 2024-11-26 17:04:22 -05:00
c45a674c72 Adds cache to nginx 2024-11-25 20:40:48 -05:00
MathieuSevignyLavallee
5c24ae56a9 write to file base 2024-11-15 20:09:56 -05:00
MathieuSevignyLavallee
f835c733a1 metrique de base 2024-11-15 19:57:55 -05:00
MathieuSevignyLavallee
5c21b6a15f optimize and cleanup 2024-11-15 19:35:41 -05:00
MathieuSevignyLavallee
b608793ac3 base
no library for socket.io
2024-11-15 17:46:01 -05:00
878fd302a4
Merge pull request #167 from ets-cfuhrman-pfe/ansible-it3-PFEA2024
Ansible it3 pfea2024
2024-11-14 14:22:21 -05:00
MathieuSevignyLavallee
0b2552bdff DNS name for proxying 2024-11-12 12:06:26 -05:00
706308d54f
Merge pull request #164 from ets-cfuhrman-pfe/socket-image
QuizRoom separation and duplication
2024-11-12 12:01:58 -05:00
MathieuSevignyLavallee
977d1c9700 added jwt token to room routes 2024-11-12 11:44:15 -05:00
35d6724d87 Removes needs of port forwarding + francisation 2024-11-12 02:33:19 -05:00
806935e48c Adds container cleaning - doesn't clean populated container on reboot 2024-11-12 01:22:54 -05:00
3c2bcb4ed4 Adds healthcheck to quiz + adds image to build 2024-11-11 23:00:38 -05:00
MathieuSevignyLavallee
2c7fd9c828 Delete room on end-quiz 2024-11-11 17:45:30 -05:00
MathieuSevignyLavallee
db6fa947d7 Working
Quand le professeur créer le room il y a un delai avec aucune info puis ca afficher la classe donc petit bug
2024-11-11 15:46:02 -05:00
MathieuSevignyLavallee
d37e6c540a Redirecting not working 2024-11-11 15:16:59 -05:00
MathieuSevignyLavallee
b744284472 basic functions for room Creation using docker 2024-11-11 11:32:46 -05:00
MathieuSevignyLavallee
cca9a2c99a provider create docker quizRoom 2024-11-10 23:41:03 -05:00
2df750b6f7
Merge pull request #163 from ets-cfuhrman-pfe/nginx-port-forward
Nginx port forward
2024-11-10 22:54:20 -05:00
d563459aa6 Was able to show a VERY guided demo 2024-11-10 22:52:04 -05:00
678d1c2250 trying to setup dynamic nginx 2024-11-10 20:42:02 -05:00
80115f050c setup routing 2024-11-10 16:33:45 -05:00
c26708a609 semi-stable-state - non-working
Co-authored-by: MathieuSevignyLavallee <MathieuSevignyLavallee@users.noreply.github.com>
2024-11-10 15:42:46 -05:00
MathieuSevignyLavallee
bbc0359ead add to the compose file 2024-11-07 12:39:36 -05:00
Jerry Kwok
e08d1477ec documentation for ansible deployment 2024-11-06 09:07:15 -05:00
MathieuSevignyLavallee
85bd93792c base image for quizRoom socket 2024-11-05 16:37:07 -05:00
Jerry Kwok
3fd562c144 fix deploy.yml 2024-11-05 15:58:26 -05:00
f2597f5491
Update deploy.yml
Adds dependencies validation
2024-11-05 15:12:39 -05:00
4cca066751 adds valkey default config 2024-11-03 16:36:21 -05:00
Jerry Kwok
cc420b3a9c fix deploy.yml 2024-10-31 15:33:16 -04:00
Jerry Kwok
ff7f0da964 update deploy.yml to run docker-compose 2024-10-31 15:31:12 -04:00
Jerry Kwok
900ccd847f update deploy.yml file to test deployment with WSL 2024-10-31 10:49:06 -04:00
Jerry Kwok
3db53c5cc4 docker file for ansible 2024-10-30 10:45:11 -04:00
Jerry Kwok
93e16f8d0b testing ansible files 2024-10-30 10:42:21 -04:00
32bcb67f33 Adds base for multi-room
Co-authored-by: roesnerb <roesnerb@users.noreply.github.com>
Co-authored-by: MathieuSevignyLavallee <MathieuSevignyLavallee@users.noreply.github.com>
2024-10-29 16:47:10 -04:00
103 changed files with 10031 additions and 1358 deletions

View file

@ -103,4 +103,35 @@ jobs:
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-quizroom:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata for Quizroom Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}-quizroom
tags: |
type=ref,event=branch
type=semver,pattern={{version}}
- name: Build and push Quizroom Docker image
uses: docker/build-push-action@v5
with:
context: ./quizRoom
push: ${{ github.event_name != 'pull_request' }}
platforms: linux/amd64
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max

33
.github/workflows/create-docs.yml vendored Normal file
View file

@ -0,0 +1,33 @@
name: Creates docs and deploy to gh-pages
on:
workflow_call:
workflow_dispatch:
push:
branches: [ main ]
jobs:
build:
name: Deploy docs
runs-on: ubuntu-latest
env:
PUMLURL: "https://www.plantuml.com/plantuml/"
steps:
- name: Checkout main
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v5
- name: Install dependencies
working-directory: ./documentation
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Build docs
working-directory: ./documentation
run: mkdocs build --verbose --clean
- name: Push docs to gh-pages
working-directory: ./documentation
run: python deploy.py

15
.gitignore vendored
View file

@ -73,7 +73,7 @@ web_modules/
.yarn-integrity
# dotenv environment variable files
.env
server/.env
.env.development.local
.env.test.local
.env.production.local
@ -129,3 +129,16 @@ dist
.yarn/install-state.gz
.pnp.*
db-backup/
**/.env
.venv
deployments
/test/stressTest/output
# Opentofu state
opentofu/*/.terraform
opentofu/*/.terraform.lock*
opentofu/*/terraform.tfstate*
opentofu/*/terraform.tfvars
# Opentofu auth config
opentofu/auth_config.json

39
.vscode/launch.json vendored Normal file
View file

@ -0,0 +1,39 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"type": "node",
"request": "launch",
"name": "Debug backend",
"skipFiles": [
"<node_internals>/**"
],
"program": "${workspaceFolder}/server/app.js",
"cwd":"${workspaceFolder}/server/"
},
{
"type": "msedge",
"request": "launch",
"name": "Debug frontend",
"url": "http://localhost:5173",
"webRoot": "${workspaceFolder}/client/"
},
{
"name": "Docker: Attach to Node",
"type": "node",
"request": "attach",
"restart": true,
"port": 9229,
"address": "localhost",
"localRoot": "${workspaceFolder}",
"remoteRoot": "/app",
"protocol": "inspector",
"skipFiles": [
"<node_internals>/**"
]
}
]
}

10
ansible/Dockerfile Normal file
View file

@ -0,0 +1,10 @@
FROM python:3.9-slim
# Installer Ansible
RUN pip install ansible
# Définir le répertoire de travail
WORKDIR /ansible
# Copier les fichiers nécessaires
COPY inventory.ini deploy.yml ./

40
ansible/README.md Normal file
View file

@ -0,0 +1,40 @@
# Déploiement de Services avec Ansible et Docker Compose
Ce guide explique comment utiliser Ansible pour configurer et déployer des services Docker avec `docker-compose`.
## Prérequis
1. **Ansible** : Assurez-vous qu'Ansible est installé sur votre système.
- [Guide d'installation d'Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html)
2. **Docker et Docker Compose** : Docker doit être installé et configuré pour fonctionner avec Ansible.
- Installez Docker : [Documentation Docker](https://docs.docker.com/get-docker/)
- Docker Compose est inclus comme plugin Docker dans les versions récentes de Docker.
3. **WSL (pour Windows)** : Si vous utilisez Windows, assurez-vous d'avoir configuré WSL et un environnement Ubuntu.
## Structure du projet
Le fichier `deploy.yml` contient les tâches Ansible nécessaires pour télécharger, configurer, et démarrer les services Docker en utilisant Docker Compose.
## Installation et de déploiement
### Lancer le déploiement avec Ansible
Pour exécuter le playbook Ansible `deploy.yml`, utilisez la commande suivante depuis le répertoire racine du projet :
`ansible-playbook -i inventory.ini deploy.yml`
### Vérification du déploiement
Une fois le playbook exécuté, Ansible télécharge Docker et Docker Compose, télécharge le fichier `docker-compose.yaml`, démarre Docker et lance les conteneurs spécifiés.
### Configuration et contenu du Playbook (deploy.yml)
Le playbook deploy.yml exécute les étapes suivantes :
1. Télécharge Docker Compose si ce dernier n'est pas encore présent.
2. Vérifie l'installation de Docker Compose pour s'assurer qu'il est opérationnel.
3. Démarre le service Docker si ce n'est pas déjà le cas.
4. Télécharge le fichier docker-compose.yaml depuis le dépôt Git spécifié.
5. Lance Docker Compose pour déployer les conteneurs définis dans docker-compose.yaml.
6. Vérifie l'état des conteneurs et affiche les conteneurs en cours d'exécution.

38
ansible/deploy.yml Normal file
View file

@ -0,0 +1,38 @@
---
- name: Déployer des services avec Docker Compose
hosts: local
tasks:
- name: Télécharger Docker
ansible.builtin.package:
name: docker-compose
state: present
- name: Vérifier l'installation de Docker Compose plugin
ansible.builtin.command:
cmd: docker compose version
- name: Commencer le service docker
ansible.builtin.service:
name: docker
state: started
enabled: yes
- name: Telecharger le fichier docker-compose
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/ets-cfuhrman-pfe/EvalueTonSavoir/refs/heads/main/docker-compose.yaml
dest: ./docker-compose.yaml
- name: Lancer Docker Compose
ansible.builtin.shell:
docker-compose up -d
become: true
- name: Vérification des services Docker
ansible.builtin.command:
cmd: docker ps
register: docker_ps_output
- name: Afficher l'état des conteneurs Docker
ansible.builtin.debug:
msg: "{{ docker_ps_output.stdout }}"

View file

@ -0,0 +1,70 @@
services:
frontend:
image: fuhrmanator/evaluetonsavoir-frontend:latest
container_name: frontend
ports:
- "5173:5173"
environment:
VITE_BACKEND_URL: "http://localhost:4400"
# don't define VITE_BACKEND_SOCKET_URL so it will default to window.location.host
# VITE_BACKEND_SOCKET_URL: ""
restart: always
backend:
image: fuhrmanator/evaluetonsavoir-backend:latest
container_name: backend
ports:
- "3000:3000"
environment:
PORT: 3000
MONGO_URI: "mongodb://mongo:27017/evaluetonsavoir"
MONGO_DATABASE: evaluetonsavoir
EMAIL_SERVICE: gmail
SENDER_EMAIL: infoevaluetonsavoir@gmail.com
EMAIL_PSW: 'vvml wmfr dkzb vjzb'
JWT_SECRET: haQdgd2jp09qb897GeBZyJetC8ECSpbFJe
FRONTEND_URL: "http://localhost:5173"
depends_on:
- mongo
restart: always
# Ce conteneur sert de routeur pour assurer le bon fonctionnement de l'application
nginx:
image: fuhrmanator/evaluetonsavoir-routeur:latest
container_name: nginx
ports:
- "80:80"
depends_on:
- backend
- frontend
restart: always
# Ce conteneur est la base de données principale pour l'application
mongo:
image: mongo
container_name: mongo
ports:
- "27017:27017"
tty: true
volumes:
- mongodb_data:/data/db
restart: always
# Ce conteneur assure que l'application est à jour en allant chercher s'il y a des mises à jours à chaque heure
watchtower:
image: containrrr/watchtower
container_name: watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- TZ=America/Montreal
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_DEBUG=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_SCHEDULE=0 0 5 * * * # At 5 am everyday
restart: always
volumes:
mongodb_data:
external: false

9
ansible/inventory.ini Normal file
View file

@ -0,0 +1,9 @@
# Spécifier les serveurs où vous souhaitez déployer votre application.
# Remplacez votre_ip_serveur par ladresse IP de votre serveur, et votre_utilisateur_ssh par le nom dutilisateur SSH.
# Pour les serveurs
# [app_servers]
# votre_ip_serveur ansible_user=votre_utilisateur_ssh
[local]
localhost ansible_connection=local ansible_python_interpreter=/usr/bin/python3

View file

@ -12,6 +12,10 @@ RUN npm install
RUN npm run build
EXPOSE 5173
ENV PORT=5173
EXPOSE ${PORT}
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
CMD curl -f http://localhost:${PORT} || exit 1
CMD [ "npm", "run", "preview" ]

309
client/package-lock.json generated
View file

@ -19,6 +19,7 @@
"@mui/material": "^6.1.0",
"@types/uuid": "^9.0.7",
"axios": "^1.6.7",
"dockerode": "^4.0.2",
"esbuild": "^0.23.1",
"gift-pegjs": "^1.0.2",
"jest-environment-jsdom": "^29.7.0",
@ -1897,6 +1898,12 @@
"node": ">=6.9.0"
}
},
"node_modules/@balena/dockerignore": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@balena/dockerignore/-/dockerignore-1.0.2.tgz",
"integrity": "sha512-wMue2Sy4GAVTk6Ic4tJVcnfdau+gx2EnG7S+uAEe+TWJFqE4YoWN4/H8MSLj4eYJKxGg26lZwboEniNiNwZQ6Q==",
"license": "Apache-2.0"
},
"node_modules/@bcoe/v8-coverage": {
"version": "0.2.3",
"resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz",
@ -5190,6 +5197,15 @@
"dequal": "^2.0.3"
}
},
"node_modules/asn1": {
"version": "0.2.6",
"resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz",
"integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==",
"license": "MIT",
"dependencies": {
"safer-buffer": "~2.1.0"
}
},
"node_modules/async": {
"version": "3.2.6",
"resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz",
@ -5458,6 +5474,35 @@
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
},
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/bcrypt-pbkdf": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
"integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==",
"license": "BSD-3-Clause",
"dependencies": {
"tweetnacl": "^0.14.3"
}
},
"node_modules/binary-extensions": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
@ -5469,6 +5514,17 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/bl": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz",
"integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==",
"license": "MIT",
"dependencies": {
"buffer": "^5.5.0",
"inherits": "^2.0.4",
"readable-stream": "^3.4.0"
}
},
"node_modules/brace-expansion": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
@ -5542,12 +5598,45 @@
"node-int64": "^0.4.0"
}
},
"node_modules/buffer": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz",
"integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT",
"dependencies": {
"base64-js": "^1.3.1",
"ieee754": "^1.1.13"
}
},
"node_modules/buffer-from": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
"dev": true
},
"node_modules/buildcheck": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/buildcheck/-/buildcheck-0.0.6.tgz",
"integrity": "sha512-8f9ZJCUXyT1M35Jx7MkBgmBMo3oHTTBIPLiY9xyL0pl3T5RwcPEY8cUHr5LBNfu/fk6c2T4DJZuVM/8ZZT2D2A==",
"optional": true,
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
@ -5658,6 +5747,12 @@
"node": ">= 6"
}
},
"node_modules/chownr": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
"integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==",
"license": "ISC"
},
"node_modules/ci-info": {
"version": "3.9.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz",
@ -5786,6 +5881,20 @@
"node": ">=10"
}
},
"node_modules/cpu-features": {
"version": "0.0.10",
"resolved": "https://registry.npmjs.org/cpu-features/-/cpu-features-0.0.10.tgz",
"integrity": "sha512-9IkYqtX3YHPCzoVg1Py+o9057a3i0fp7S530UWokCSaFVTc7CwXPRiOjRjBQQ18ZCNafx78YfnG+HALxtVmOGA==",
"hasInstallScript": true,
"optional": true,
"dependencies": {
"buildcheck": "~0.0.6",
"nan": "^2.19.0"
},
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/create-jest": {
"version": "29.7.0",
"resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz",
@ -6057,6 +6166,35 @@
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/docker-modem": {
"version": "5.0.3",
"resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-5.0.3.tgz",
"integrity": "sha512-89zhop5YVhcPEt5FpUFGr3cDyceGhq/F9J+ZndQ4KfqNvfbJpPMfgeixFgUj5OjCYAboElqODxY5Z1EBsSa6sg==",
"license": "Apache-2.0",
"dependencies": {
"debug": "^4.1.1",
"readable-stream": "^3.5.0",
"split-ca": "^1.0.1",
"ssh2": "^1.15.0"
},
"engines": {
"node": ">= 8.0"
}
},
"node_modules/dockerode": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/dockerode/-/dockerode-4.0.2.tgz",
"integrity": "sha512-9wM1BVpVMFr2Pw3eJNXrYYt6DT9k0xMcsSCjtPvyQ+xa1iPg/Mo3T/gUcwI0B2cczqCeCYRPF8yFYDwtFXT0+w==",
"license": "Apache-2.0",
"dependencies": {
"@balena/dockerignore": "^1.0.2",
"docker-modem": "^5.0.3",
"tar-fs": "~2.0.1"
},
"engines": {
"node": ">= 8.0"
}
},
"node_modules/dom-accessibility-api": {
"version": "0.5.16",
"resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz",
@ -6123,6 +6261,15 @@
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true
},
"node_modules/end-of-stream": {
"version": "1.4.4",
"resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
"integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
"license": "MIT",
"dependencies": {
"once": "^1.4.0"
}
},
"node_modules/engine.io-client": {
"version": "6.5.4",
"resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-6.5.4.tgz",
@ -6797,6 +6944,12 @@
"node": ">= 6"
}
},
"node_modules/fs-constants": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz",
"integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==",
"license": "MIT"
},
"node_modules/fs-extra": {
"version": "11.2.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz",
@ -7078,6 +7231,26 @@
"node": ">=4"
}
},
"node_modules/ieee754": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
"integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "BSD-3-Clause"
},
"node_modules/ignore": {
"version": "5.3.2",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz",
@ -7153,8 +7326,7 @@
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"dev": true
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"node_modules/is-arrayish": {
"version": "0.2.1",
@ -10208,11 +10380,24 @@
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/mkdirp-classic": {
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz",
"integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==",
"license": "MIT"
},
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
},
"node_modules/nan": {
"version": "2.22.0",
"resolved": "https://registry.npmjs.org/nan/-/nan-2.22.0.tgz",
"integrity": "sha512-nbajikzWTMwsW+eSsNm3QwlOs7het9gGJU5dDZzRTQGk03vyBOauxgI4VakDzE0PtsGTmXPsXTbbjVhRwR5mpw==",
"license": "MIT",
"optional": true
},
"node_modules/nanoid": {
"version": "5.0.7",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.0.7.tgz",
@ -10284,7 +10469,6 @@
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dev": true,
"dependencies": {
"wrappy": "1"
}
@ -10661,6 +10845,16 @@
"resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz",
"integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag=="
},
"node_modules/pump": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.2.tgz",
"integrity": "sha512-tUPXtzlGM8FE3P0ZL6DVs/3P58k9nk8/jZeQCurTJylQA8qFYzHFfhBJkuqyE0FifOsQ0uKWekiZ5g8wtr28cw==",
"license": "MIT",
"dependencies": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
}
},
"node_modules/punycode": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
@ -10805,6 +10999,20 @@
"react-dom": ">=16.6.0"
}
},
"node_modules/readable-stream": {
"version": "3.6.2",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
"integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
"license": "MIT",
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/readdirp": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
@ -11047,6 +11255,26 @@
"queue-microtask": "^1.2.2"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
@ -11182,12 +11410,35 @@
"node": ">=0.10.0"
}
},
"node_modules/split-ca": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/split-ca/-/split-ca-1.0.1.tgz",
"integrity": "sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ==",
"license": "ISC"
},
"node_modules/sprintf-js": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
"integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==",
"dev": true
},
"node_modules/ssh2": {
"version": "1.16.0",
"resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.16.0.tgz",
"integrity": "sha512-r1X4KsBGedJqo7h8F5c4Ybpcr5RjyP+aWIG007uBPRjmdQWfEiVLzSK71Zji1B9sKxwaCvD8y8cwSkYrlLiRRg==",
"hasInstallScript": true,
"dependencies": {
"asn1": "^0.2.6",
"bcrypt-pbkdf": "^1.0.2"
},
"engines": {
"node": ">=10.16.0"
},
"optionalDependencies": {
"cpu-features": "~0.0.10",
"nan": "^2.20.0"
}
},
"node_modules/stack-utils": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz",
@ -11207,6 +11458,15 @@
"node": ">=8"
}
},
"node_modules/string_decoder": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
"integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
"license": "MIT",
"dependencies": {
"safe-buffer": "~5.2.0"
}
},
"node_modules/string-length": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz",
@ -11319,6 +11579,34 @@
"resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz",
"integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw=="
},
"node_modules/tar-fs": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.0.1.tgz",
"integrity": "sha512-6tzWDMeroL87uF/+lin46k+Q+46rAJ0SyPGz7OW7wTgblI273hsBqk2C1j0/xNadNLKDTUL9BukSjB7cwgmlPA==",
"license": "MIT",
"dependencies": {
"chownr": "^1.1.1",
"mkdirp-classic": "^0.5.2",
"pump": "^3.0.0",
"tar-stream": "^2.0.0"
}
},
"node_modules/tar-stream": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz",
"integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==",
"license": "MIT",
"dependencies": {
"bl": "^4.0.3",
"end-of-stream": "^1.4.1",
"fs-constants": "^1.0.0",
"inherits": "^2.0.3",
"readable-stream": "^3.1.1"
},
"engines": {
"node": ">=6"
}
},
"node_modules/test-exclude": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz",
@ -11539,6 +11827,12 @@
}
}
},
"node_modules/tweetnacl": {
"version": "0.14.5",
"resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
"integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==",
"license": "Unlicense"
},
"node_modules/type-check": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
@ -11765,6 +12059,12 @@
"requires-port": "^1.0.0"
}
},
"node_modules/util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
"license": "MIT"
},
"node_modules/uuid": {
"version": "9.0.1",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz",
@ -12637,8 +12937,7 @@
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
"dev": true
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
},
"node_modules/write-file-atomic": {
"version": "4.0.2",

View file

@ -23,6 +23,7 @@
"@mui/material": "^6.1.0",
"@types/uuid": "^9.0.7",
"axios": "^1.6.7",
"dockerode": "^4.0.2",
"esbuild": "^0.23.1",
"gift-pegjs": "^1.0.2",
"jest-environment-jsdom": "^29.7.0",

View file

@ -44,7 +44,7 @@ describe('WebSocketService', () => {
test('createRoom should emit create-room event', () => {
WebsocketService.connect(ENV_VARIABLES.VITE_BACKEND_URL);
WebsocketService.createRoom();
WebsocketService.createRoom('test');
expect(mockSocket.emit).toHaveBeenCalledWith('create-room');
});

View file

@ -1,7 +1,7 @@
import React, { useEffect, useState } from 'react';
import { Socket } from 'socket.io-client';
import { ENV_VARIABLES } from '../../../constants';
//import { ENV_VARIABLES } from '../../../constants';
import StudentModeQuiz from '../../../components/StudentModeQuiz/StudentModeQuiz';
import TeacherModeQuiz from '../../../components/TeacherModeQuiz/TeacherModeQuiz';
@ -27,14 +27,14 @@ const JoinRoom: React.FC = () => {
const [isConnecting, setIsConnecting] = useState<boolean>(false);
useEffect(() => {
handleCreateSocket();
//handleCreateSocket();
return () => {
disconnect();
};
}, []);
const handleCreateSocket = () => {
const socket = webSocketService.connect(ENV_VARIABLES.VITE_BACKEND_URL);
const socket = webSocketService.connect(`/api/room/${roomName}/socket`);
socket.on('join-success', () => {
setIsWaitingForTeacher(true);

View file

@ -10,7 +10,7 @@ import webSocketService, { AnswerReceptionFromBackendType } from '../../../servi
import { QuizType } from '../../../Types/QuizType';
import './manageRoom.css';
import { ENV_VARIABLES } from '../../../constants';
//import { ENV_VARIABLES } from '../../../constants';
import { StudentType, Answer } from '../../../Types/StudentType';
import { Button } from '@mui/material';
import LoadingCircle from '../../../components/LoadingCircle/LoadingCircle';
@ -79,13 +79,19 @@ const ManageRoom: React.FC = () => {
}
};
const createWebSocketRoom = () => {
const createWebSocketRoom = async () => {
setConnectingError('');
const socket = webSocketService.connect(ENV_VARIABLES.VITE_BACKEND_URL);
const room = await ApiService.createRoom();
const socket = webSocketService.connect(`/api/room/${room.id}/socket`);
socket.on('connect', () => {
webSocketService.createRoom();
webSocketService.createRoom(room.id);
});
socket.on("error", (error) => {
console.error("WebSocket server error:", error);
});
socket.on('connect_error', (error) => {
setConnectingError('Erreur lors de la connexion... Veuillez réessayer');
console.error('WebSocket connection error:', error);
@ -142,7 +148,7 @@ const ManageRoom: React.FC = () => {
console.log('Quiz questions not found (cannot update answers without them).');
return;
}
// Update the students state using the functional form of setStudents
setStudents((prevStudents) => {
// print the list of current student names
@ -150,7 +156,7 @@ const ManageRoom: React.FC = () => {
prevStudents.forEach((student) => {
console.log(student.name);
});
let foundStudent = false;
const updatedStudents = prevStudents.map((student) => {
console.log(`Comparing ${student.id} to ${idUser}`);
@ -170,7 +176,7 @@ const ManageRoom: React.FC = () => {
updatedAnswers = [...student.answers, newAnswer];
}
return { ...student, answers: updatedAnswers };
}
}
return student;
});
if (!foundStudent) {

View file

@ -80,6 +80,78 @@ class ApiService {
return localStorage.removeItem("jwt");
}
//Socket Route
/**
* Creates a new room.
* @returns The room object if successful
* @returns An error string if unsuccessful
*/
public async createRoom(): Promise<any> {
try {
const url: string = this.constructRequestUrl(`/room`);
const headers = this.constructRequestHeaders();
const response = await fetch(url, {
method: 'POST',
headers: headers,
});
if (!response.ok) {
throw new Error(`La création de la salle a échoué. Status: ${response.status}`);
}
const room = await response.json();
return room;
} catch (error) {
console.log("Error details: ", error);
if (error instanceof Error) {
return error.message || 'Erreur serveur inconnue lors de la requête.';
}
return `Une erreur inattendue s'est produite.`;
}
}
/**
* Deletes a room by its name.
* @param roomName - The name of the room to delete.
* @returns true if successful
* @returns An error string if unsuccessful
*/
public async deleteRoom(roomName: string): Promise<any> {
try {
if (!roomName) {
throw new Error(`Le nom de la salle est requis.`);
}
const url = this.constructRequestUrl(`/room/${roomName}`);
const headers = this.constructRequestHeaders();
fetch(url, {
method: 'DELETE',
headers: headers,
});
return true;
} catch (error) {
console.log("Error details: ", error);
if (error instanceof Error) {
return error.message || 'Erreur serveur inconnue lors de la requête.';
}
return `Une erreur inattendue s'est produite.`;
}
}
// User Routes
/**
@ -302,6 +374,7 @@ class ApiService {
}
}
/**
* @returns folder array if successful
* @returns A error string if unsuccessful,

View file

@ -1,5 +1,6 @@
// WebSocketService.tsx
import { io, Socket } from 'socket.io-client';
import apiService from './ApiService';
// Must (manually) sync these types to server/socket/socket.js
@ -21,10 +22,14 @@ class WebSocketService {
private socket: Socket | null = null;
connect(backendUrl: string): Socket {
// console.log(backendUrl);
this.socket = io(`${backendUrl}`, {
this.socket = io( '/',{
path: backendUrl,
transports: ['websocket'],
reconnectionAttempts: 1
autoConnect: true,
reconnection: true,
reconnectionAttempts: 10,
reconnectionDelay: 10000,
timeout: 20000,
});
return this.socket;
}
@ -37,9 +42,9 @@ class WebSocketService {
}
}
createRoom() {
createRoom(roomName: string) {
if (this.socket) {
this.socket.emit('create-room');
this.socket.emit('create-room', roomName || undefined);
}
}
@ -58,6 +63,8 @@ class WebSocketService {
endQuiz(roomName: string) {
if (this.socket) {
this.socket.emit('end-quiz', { roomName });
//Delete room in mongoDb, roomContainer will be deleted in cleanup
apiService.deleteRoom(roomName);
}
}

74
create-branch-image.bat Normal file
View file

@ -0,0 +1,74 @@
@echo off
setlocal EnableDelayedExpansion
:: Check if gh is installed
where gh >nul 2>&1
if %errorlevel% neq 0 (
echo GitHub CLI not found. Installing...
winget install --id GitHub.cli
if %errorlevel% neq 0 (
echo Failed to install GitHub CLI. Exiting...
exit /b 1
)
echo GitHub CLI installed successfully.
)
:: Check if user is authenticated
gh auth status >nul 2>&1
if %errorlevel% neq 0 (
echo GitHub CLI not authenticated. Please authenticate...
gh auth login
if %errorlevel% neq 0 (
echo Failed to authenticate. Exiting...
exit /b 1
)
echo Authentication successful.
)
:: Get the current branch name
for /f "tokens=*" %%i in ('git rev-parse --abbrev-ref HEAD') do set BRANCH_NAME=%%i
:: Run the GitHub workflow with the current branch name
echo Running GitHub workflow with branch %BRANCH_NAME%...
gh workflow run 119194149 --ref %BRANCH_NAME%
:: Wait and validate workflow launch
set /a attempts=0
set /a max_attempts=12
echo Waiting for workflow to start...
:wait_for_workflow
timeout /t 15 >nul
set /a attempts+=1
:: Get recent workflow run matching our criteria with in_progress status
for /f "tokens=*" %%i in ('gh run list --branch %BRANCH_NAME% --status in_progress --limit 1 --json databaseId --jq ".[0].databaseId"') do set WORKFLOW_RUN_ID=%%i
if "%WORKFLOW_RUN_ID%"=="" (
if !attempts! lss !max_attempts! (
echo Attempt !attempts! of !max_attempts!: No running workflow found yet...
goto wait_for_workflow
) else (
echo Timeout waiting for workflow to start running.
exit /b 1
)
)
echo Found running workflow ID: %WORKFLOW_RUN_ID%
:monitor_progress
cls
echo Workflow Progress:
echo ----------------
gh run view %WORKFLOW_RUN_ID% --json jobs --jq ".jobs[] | \"Job: \" + .name + \" - Status: \" + .status + if .conclusion != null then \" (\" + .conclusion + \")\" else \"\" end"
echo.
:: Check if workflow is still running
for /f "tokens=*" %%i in ('gh run view %WORKFLOW_RUN_ID% --json status --jq ".status"') do set CURRENT_STATUS=%%i
if "%CURRENT_STATUS%" == "completed" (
echo Workflow completed.
exit /b 0
)
timeout /t 5 >nul
goto monitor_progress

137
docker-compose.local.yaml Normal file
View file

@ -0,0 +1,137 @@
version: '3'
services:
frontend:
container_name: frontend
build:
context: ./client
dockerfile: Dockerfile
ports:
- "5173:5173"
networks:
- quiz_network
restart: always
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:$${PORT} || exit 1"]
interval: 5s
timeout: 10s
start_period: 5s
retries: 6
backend:
build:
context: ./server
dockerfile: Dockerfile
container_name: backend
networks:
- quiz_network
ports:
- "3000:3000"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
PORT: 3000
MONGO_URI: "mongodb://mongo:27017/evaluetonsavoir"
MONGO_DATABASE: evaluetonsavoir
EMAIL_SERVICE: gmail
SENDER_EMAIL: infoevaluetonsavoir@gmail.com
EMAIL_PSW: 'vvml wmfr dkzb vjzb'
JWT_SECRET: haQdgd2jp09qb897GeBZyJetC8ECSpbFJe
depends_on:
mongo:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:$${PORT}/health || exit 1"]
interval: 5s
timeout: 10s
start_period: 5s
retries: 6
quizroom: # Forces image to update
build:
context: ./quizRoom
dockerfile: Dockerfile
container_name: quizroom
ports:
- "4500:4500"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
networks:
- quiz_network
restart: always
healthcheck:
test: ["CMD", "/usr/src/app/healthcheck.sh"]
interval: 5s
timeout: 10s
start_period: 5s
retries: 6
nginx:
build:
context: ./nginx
dockerfile: Dockerfile
container_name: nginx
ports:
- "80:80"
depends_on:
frontend:
condition: service_healthy
backend:
condition: service_healthy
networks:
- quiz_network
restart: always
#environment:
# - PORT=8000
# - FRONTEND_HOST=frontend
# - FRONTEND_PORT=5173
# - BACKEND_HOST=backend
# - BACKEND_PORT=3000
healthcheck:
test: ["CMD-SHELL", "wget --spider http://0.0.0.0:$${PORT}/health || exit 1"]
interval: 5s
timeout: 10s
start_period: 5s
retries: 6
mongo:
image: mongo
container_name: mongo
ports:
- "27017:27017"
tty: true
volumes:
- mongodb_data:/data/db
networks:
- quiz_network
restart: always
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
interval: 10s
timeout: 5s
retries: 3
start_period: 20s
watchtower:
image: containrrr/watchtower
container_name: watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- TZ=America/Montreal
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_DEBUG=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_SCHEDULE=0 0 5 * * * # At 5 am everyday
networks:
- quiz_network
restart: always
networks:
quiz_network:
driver: bridge
volumes:
mongodb_data:
external: false

View file

@ -27,6 +27,17 @@ services:
- mongo
restart: always
quizroom:
build:
context: ./quizRoom
dockerfile: Dockerfile
container_name: quizroom
ports:
- "4500:4500"
depends_on:
- backend
restart: always
# Ce conteneur sert de routeur pour assurer le bon fonctionnement de l'application
nginx:
image: fuhrmanator/evaluetonsavoir-routeur:latest

1
documentation/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
site

2
documentation/deploy.py Normal file
View file

@ -0,0 +1,2 @@
from ghp_import import ghp_import
ghp_import('site', push=True, force=True)

View file

@ -0,0 +1,12 @@
## À Propos
Ce projet utilise Node.js Express pour créer un backend simple pour l'application.
## Routes API
Vous pouvez consulter toutes les routes utilisables du backend ici
* User : https://documenter.getpostman.com/view/32663805/2sA2rCU28v#e942a4f4-321c-465b-bf88-e6c1f1d6f6c8
* Quiz : https://documenter.getpostman.com/view/32663805/2sA2rCU28v#732d980b-02fd-4807-b5bc-72725098b9b0
* Folders : https://documenter.getpostman.com/view/32663805/2sA2rCU28v#49ecd432-ccfc-4c8a-8390-b3962f0d5fd7
* Images : https://documenter.getpostman.com/view/32663805/2sA2rCU28v#58382180-d6f0-492d-80c3-e09de1c368b8

View file

@ -0,0 +1,384 @@
# Authentification
## Introduction
Le but du module d'authentification est de pouvoir facilement faire des blocs de code permettant une authentification
personalisée. Il est possible de le faire grâce à cette architecture. Pour la première version de cette fonctionalité,
l'introduction de OIDC et de OAuth sont priorisé ainsi que la migration du module d'authentification simple.
## Déconstruction simple de la structure
La structure est la suivante :
Le AuthManager s'occupe de centraliser les requêtes d'authentification. Ce dernier initialise les autres modules et est
la source de vérité dans les aspects liés à l'authentification. Les modules sont automatiquement chargés par
l'utilisation de variables d'environment.
Le module s'occupe de créer les routes nécessaires pour son fonctionnement et de créer les utilisateurs. Ces modules
vont appeller le AuthManager afin de confirmer leurs actions avec le login/register de celui-ci.
Dans le cas de modules plus complexe, tels que le module Passport, la chaine peut être prolongée afin de maintenir
les actions centralisée . Chaque connecteur de PassportJs est initialisé par le module de PassportJs.
## Besoins exprimés
Modularité et généricité :
- Le système d'authentification doit être adaptable à diverses configurations, notamment pour répondre aux exigences
spécifiques des différentes universités ou institutions.
Utilisation de différentes méthodes d'authentification :
- L'application doit permettre de gérer plusieurs fournisseurs d'authentification (SSO, LDAP, OAuth, etc.) de manière
centralisée et flexible.
Facilité de configuration :
- Le système doit permettre une configuration simple et flexible, adaptée à différents environnements (développement,
production, etc.).
Gestion des permissions :
- Il doit être possible de définir et de mapper facilement les permissions et les rôles des utilisateurs pour sécuriser
laccès aux différentes fonctionnalités de lapplication.
Maintien de la connexion :
- Le système doit garantir la persistance de la connexion pendant toute la durée de l'utilisation de l'application
(exemple : quiz), avec la possibilité de se reconnecter sans perte de données en cas de déconnexion temporaire.
## Recits utilisateurs pris en comptes
- En tant qu'utilisateur de projet FOSS, je veux que le module d'authentification soit modulaire et générique afin de
l'adapter à mes besoins.
- En tant qu'administrateur, je veux que les droits des utilisateurs soient inférés par l'authentificateur de l'établissement.
- En tant qu'administrateur, je veux que la configuration des authentificateurs soit simple
- En tant qu'administrateur, je veux configurer les connexions à partir de variables d'environnement ou fichier de config.
- En tant qu'utilisateur, je veux que ma connexion soit stable.
- En tant qu'utilisateur, je veux pouvoir me reconnecter à une salle s'il survient un problème de connexion.
## Diagrammes
### Structure
```plantuml
@startuml
package Backend {
class AuthManager{
+IAuthModule[] auths
#userInfos
-load()
-registerAuths()
+showAuths()
+authStatus()
+logIn(UserInfos)
+register(UserInfos)
+logOut()
}
interface IAuthModule{
+registerAuth()
+authenticate()
+register()
+showAuth()
}
class SimpleFormAuthModule{
}
class PassportAuthModule{
IPassportProviderDefinition[] providers
}
Interface IPassportProviderDefinition{
+name
+type
}
class OAuthPassportProvider{
+clientId
+clientSecret
+configUrl
+authorizeUrl
+tokenUrl
+userinfoUrl
+logoutUrl
+JWKSUrl
}
IAuthModule <|-- SimpleFormAuthModule
IAuthModule <|-- PassportAuthModule
IPassportProviderDefinition <|-- OAuthPassportProvider
AuthManager -> IAuthModule
PassportAuthModule -> IPassportProviderDefinition
}
package Frontend{
class AuthDrawer{
+IAuthVisual[] getAuthsVisual()
+drawAuths()
}
Interface IAuthVisual{
+draw()
}
class FormVisual{
+FormInput[] formInputs
}
interface FormInput{
+name
+label
+type
+value
}
AuthDrawer -> IAuthVisual
IAuthVisual <|-- FormVisual
FormVisual -> FormInput
}
@enduml
```
### Explication des communications : Passport Js
```plantuml
@startuml
box "Frontend"
participant User
Participant App
end box
box "Backend"
participant PassportAuthModule
participant Db
participant AuthManager
end box
box "Auth Server"
participant AuthServer
end box
User -> App : Get auth page
App -> User : auth page
User -> App : click OAuth button
App -> User : redirect to OAuth
User -> AuthServer: Login
AuthServer -> User: Redirect to Auth endpoint with token
User -> PassportAuthModule: Authenticate with token
PassportAuthModule -> AuthServer: get user info
AuthServer -> PassportAuthModule: userInfo
alt login
PassportAuthModule -> Db : fetch local userInfo
Db->PassportAuthModule: userInfo
PassportAuthModule -> PassportAuthModule: Merge userInfo definition
PassportAuthModule -> Db : update user profile
Db->PassportAuthModule: userInfo
end
alt register
PassportAuthModule -> Db : fetch local userInfo
Db->PassportAuthModule: null
PassportAuthModule -> Db : create user profile
Db->PassportAuthModule: userInfo
end
PassportAuthModule -> AuthManager : login(userInfos)
AuthManager -> User: Give refresh token + Redirect to page
User -> App: get /
App -> User: Show Authenticated /
@enduml
```
### Explication des communications : SimpleAuth
```plantuml
@startuml
box "Frontend"
participant User
Participant App
end box
box "Backend"
participant SimpleAuthModule
participant Db
participant AuthManager
end box
User -> App : Get auth page
App -> User : auth page
alt Login
User -> App : Send Login/Pass
App -> SimpleAuthModule: Send login/pass
SimpleAuthModule -> Db: get user info
Db->SimpleAuthModule: user info
SimpleAuthModule -> SimpleAuthModule: Validate Hash
end
alt register
User -> App : Send Username + Password + Email
App -> SimpleAuthModule: Send Username + Password + Email
SimpleAuthModule -> Db: get user info
Db -> SimpleAuthModule : null
SimpleAuthModule -> Db: put user info
end
SimpleAuthModule -> AuthManager: userInfo
AuthManager -> User: Give refresh token + Redirect to page
User -> App: get /
App -> User: Show Authenticated /
@enduml
```
### Comment les boutons sont affichés
```plantuml
@startuml
box "FrontEnd"
participant User
Participant FrontEnd
Participant AuthDrawer
end box
box "BackEnd"
participant API
participant AuthManager
participant Db
participant IAuthModule
end box
API -> API : load global configurations
create AuthManager
API -> AuthManager : instanciate with auth configurations
create IAuthModule
AuthManager -> IAuthModule : instanciate array
loop For each auth in auths
AuthManager -> IAuthModule : register
IAuthModule -> API : register routes
API -> IAuthModule : route registration confirmation
IAuthModule -> AuthManager : module registration confirmation
end
User -> FrontEnd : get login page
alt already logged in
FrontEnd -> User: redirected to authenticated page
end
FrontEnd -> AuthDrawer : get auth visual
AuthDrawer -> API : get auth form data
API -> AuthManager : get auth form data
loop For each auth in auths
AuthManager -> IAuthModule : get form data
IAuthModule -> AuthManager : form data
end
AuthManager -> API : auth fom data
API -> AuthDrawer : auth form data
AuthDrawer -> AuthDrawer : make auth html
AuthDrawer -> FrontEnd : auth HTML
FrontEnd -> User : show auth page
@enduml
```
### Comment les sessions sont conservées
```plantuml
@startuml
box "Frontend"
participant User
Participant App
end box
box "Backend"
participant AuthManager
participant IAuthModules
end box
App -> AuthManager : send refresh token
AuthManager -> IAuthModules: ForEach check if logged
IAuthModules -> AuthManager: is authenticated ?
alt one logged in
AuthManager -> App : send new token
end
alt all logged out
AuthManager -> App : send error
App -> App : destroy token
App -> User : redirect to login page
end
@enduml
```
## Configuration des variables d'environnement
Example de configuration du fichier : `server/auth_config.json` :
```json
{
"auth": {
"passportjs": // Module
[
{
"gmatte": { // Nom du sous-module Passport
"type": "oauth", // type
"OAUTH_AUTHORIZATION_URL": "https://auth.gmatte.xyz/application/o/authorize/",
"OAUTH_TOKEN_URL": "https://auth.gmatte.xyz/application/o/token/",
"OAUTH_USERINFO_URL": "https://auth.gmatte.xyz/application/o/userinfo/",
"OAUTH_CLIENT_ID": "--redacted--",
"OAUTH_CLIENT_SECRET": "--Redacted--",
"OAUTH_ADD_SCOPE": "groups", // scopes supplémentaire nécessaire pour le pivot
"OAUTH_ROLE_TEACHER_VALUE": "groups_evaluetonsavoir-prof", // valeur de pivot afin de définir un enseignant
"OAUTH_ROLE_STUDENT_VALUE": "groups_evaluetonsavoir" // valeur de pivot afin de définir un étudiant
}
},
{
"etsmtl":{
"type":"oidc",
"OIDC_CONFIG_URL":"https://login.microsoftonline.com/70aae3b7-9f3b-484d-8f95-49e8fbb783c0/v2.0/.well-known/openid-configuration",
"OIDC_CLIENT_ID": "--redacted--",
"OIDC_CLIENT_SECRET": "--redacted--",
"OIDC_ADD_SCOPE": "",
"OIDC_ROLE_TEACHER_VALUE": "groups_evaluetonsavoir-prof",
"OIDC_ROLE_STUDENT_VALUE": "groups_evaluetonsavoir"
}
}
],
"simpleauth":{}
}
}
```

View file

@ -0,0 +1,11 @@
# Type de base de données
La base de données est une MongoDB.
# Collections disponibles
* Files : Ceci est la collection qui contient les différents quiz et leurs questions.
* Folders : Ceci est la collection qui contient les dossiers qui servent à la gestion des différents quiz
* Images : C'est dans cette collection que sont stockées les images utilisées dans les quiz
* Users : Cette collection est utilisée pour la gestion des utilisateurs
# Information sur la création
Lors du démarrage du projet, la base de données est créée automatiquement.

View file

@ -0,0 +1,43 @@
# KaTeX
KaTeX est le module qui s'occupe de formater les formules mathématiques selon la configuration donnée.
Les formules entourées de $$ s'afficheront centrées sur leur propre ligne
`.replace(/\$\$(.*?)\$\$/g, (_, inner) => katex.renderToString(inner, { displayMode: true }))`
alors que les formules entourées de $ s'afficheront sur la même ligne
`.replace(/\$(.*?)\$/g, (_, inner) => katex.renderToString(inner, { displayMode: false }))`
La configuration du formatage peut être trouvée dans le fichier TextType.ts situé dans le dossier
EvalueTonSavoir/client/src/components/GiftTemplate/templates
C'est aussi dans ce fichier que le format markdown est pris en charge.
## Éditeur de quiz
Pour l'affichage dans l'éditeur de quiz, on peut retrouver la classe TextType être appliquée sur différents éléments
du dossier templates, par exemple la classe Numerical.ts.
On peut voir ici que le TextType est appliqué sur le contenu de la question:
```typescript
<p style="${ParagraphStyle(state.theme)}">${TextType({text: stem })}</p>
```
Selon ce qui avait été écrit dans la question, la classe s'occupera de formatter les bonnes sections.
## Affichage de questions
Le module React-latex était utilisé pour le formatage des questions durant un quiz, mais cela a apporté un problème
de disparité d'affichage entre la création et l'affichage des questions avec des formules mathématiques.
Les classes affichant les questions durant un quiz peuvent utiliser ce format, mais avec une manipulation de plus.
Les variables contenant la question doivent d'abord avoir un type TextFormat pour pouvoir faire appel à la classe qui
s'occupe du format sous le module KaTeX. Puis, étant sur un environnement React, il faut utiliser la propriété
dangerouslySetInnerHTML pour afficher la question correctement.
`<div dangerouslySetInnerHTML={{ __html: TextType({text: questionContent}) }} />
`
Ce type de manipulation peut être utilisé dans d'autres environnements React si on veut éviter d'utiliser React-latex.

View file

@ -0,0 +1,54 @@
# Example de Quiz
```gift
//-----------------------------------------//
// Examples from gift/format.php.
//-----------------------------------------//
Who's buried in Grant's tomb?{~Grant ~Jefferson =no one}
Grant is {~buried =entombed ~living} in Grant's tomb.
Grant is buried in Grant's tomb.{FALSE}
Who's buried in Grant's tomb?{=no one =nobody}
When was Ulysses S. Grant born?{#1822:5}
Match the following countries with their corresponding capitals. {
=Canada -> Ottawa
=Italy -> Rome
=Japan -> Tokyo
=India -> New Delhi
####It's good to know the capitals
}
//-----------------------------------------//
// More complicated examples.
//-----------------------------------------//
::Grant's Tomb::Grant is {
~buried#No one is buried there.
=entombed#Right answer!
~living#We hope not!
} in Grant's tomb.
Difficult multiple choice question.{
~wrong answer #comment on wrong answer
~%50%half credit answer #comment on answer
=full credit answer #well done!}
::Jesus' hometown (Short answer ex.):: Jesus Christ was from {
=Nazareth#Yes! That's right!
=%75%Nazereth#Right, but misspelled.
=%25%Bethlehem#He was born here, but not raised here.
}.
//this comment will be ignored by the filter
::Numerical example::
When was Ulysses S. Grant born? {#
=1822:0 #Correct! 100% credit
=%50%1822:2 #He was born in 1822.
You get 50% credit for being close.
}
```

View file

@ -0,0 +1,146 @@
{
"openapi": "3.0.2",
"info": {
"title": "Room API"
},
"servers":[
{
"url": "http://localhost",
"description": "Via Docker"
},
{
"url": "http://localhost:3000",
"description": "Via npm"
}
],
"security": [
{
"bearerAuth": []
}
],
"paths": {
"/api/room": {
"get": {
"summary": "Get all rooms",
"description": "Returns a list of rooms",
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Room"
}
}
}
}
}
}
},
"post": {
"summary": "Create a new room",
"description": "Creates a new room, returns the created room",
"responses": {
"200": {
"description": "Created",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Room"
}
}
}
}
}
}
},
"/api/room/{roomId}": {
"get": {
"summary": "Get a room by id",
"description": "Returns a room by id",
"parameters": [
{
"name": "roomId",
"in": "path",
"required": true,
"schema": {
"type": "integer"
}
}
],
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Room"
}
}
}
}
}
},
"delete": {
"summary": "Delete a room by id",
"description": "Deletes a room by id",
"parameters": [
{
"name": "roomId",
"in": "path",
"required": true,
"schema": {
"type": "integer"
}
}
],
"responses": {
"200": {
"description": "OK"
}
}
}
}
},
"components": {
"securitySchemes": {
"bearerAuth": {
"type": "http",
"scheme": "bearer",
"bearerFormat": "JWT"
}
},
"schemas": {
"Room": {
"type": "object",
"properties": {
"id": {
"type": "integer",
"default": "autoincrement"
},
"name": {
"type": "string"
},
"host": {
"type": "string"
},
"nbStudents": {
"type": "integer",
"default": 0
},
"mustBeCleaned": {
"type": "boolean",
"default": false
}
},
"required": [
"id",
"name",
"host"
]
}
}
}
}

View file

@ -0,0 +1,193 @@
# Salles de Quiz
## Introduction
Les salles de quiz ont été extraites dans leur propre conteneur afin de limiter les dégâts liés soit à une
surutilisation d'une salle soit à une attaque sur le logiciel.
En effet, le découplement permet a un quiz de:
- Survivre même si le backend est non-fonctionnel
- Mourir sans entrainer toute l'application avec elle
- Créer/Supprimer des salles automatiquement dépendant de la demande
Pour effectuer ceci, il faut faire une petite gymnastique. Il y a une route dans l'api servant à gérer les salles.
Lorsqu'un utilisateur demande le socket d'une salle : "/api/rooms/{id}/socket", la requête rebondit sur le proxy Nginx.
Celui-ci contacte le backend afin d'obtenir l'adresse de l'ordinateur auquel envoyer la requête et redirige le socket
vers cette adresse.
## Déconstruction simple de la structure
Un module supplémentaire a été ajouté à la structure : Rooms.
L'objet `room` est la définition d'une salle de façon minimaliste. Cette définition est aggrandie avec l'information
récoltée du "provider".
Le `provider` est le système gérant les différentes salles. Dans l'implémentation effectuée, il s'agit de docker.
Lorsque l'api des salles est instantié, celui-ci est lié avec un "provider", définissant comment les salles seront créées.
L'api des salles permet de les ajouter, les supprimer, et les consulter.
L'api lance deux "jobs":
- Une vérification de l'état de santé des salles. Celle-ci roule tous les 10 secondes et met a jour les salles.
- Une suppression des salles. Celle-ci roule tous les 30 secondes et supprimme automatiquement les salles ayant la
mention de suppression.
## Besoins exprimés
Fiabilite :
- Nous voulons s'assurer qu'il soit possible d'avoir un grand nombre d'élèves présent sans qu'il y ait des problèmes de
déconnexions
- Nous voulons que le temps de réponse soit faible
- Nous voulons que le système soit capable de fonctionner de facon indépendante
## Recis utilisateurs pris en comptes
- En tant qu'enseignant, je veux que tout mes élèves soient capable de se connecter à la salle de classe rapidement
- En tant qu'enseignant, je veux que la salle de quiz puisse survivre des pannes liées aux autres modules de l'aplication
- En tant qu'administrateur, je veux que les salles soient indépendantes et n'impactent pas les performances des autres salles
- En tant qu'administrateur, je veux que les salles puissent être hébergées séparément du projet
## Diagrammes
### Structure
```plantuml
@startuml
class Room{
+id
+name
+host
+nbStudents
+mustBeCleaned
}
class RoomRepository {
+get(id)
+create(room)
+delete(id)
+update(room,id)
+getAll()
}
class RoomController {
+setupRoom(options)
+deleteRoom(roomId)
+listRooms()
+getRoomStatus(roomId)
+updateRoom(room,roomId)
}
class RoomRouter{
+ / : GET
+ /:id : GET
+ / : POST
+ /:id : PUT
+ /:id : DELETE
}
class BaseRoomProvider {
+createRoom(roomid,options)
+deleteRoom(roomId)
+getRoomInfo(roomId)
+getRoomStatus(roomId)
+listRooms()
-cleanup()
-syncInstantiatedRooms()
#updateRoomInfos()
}
class DockerRoomProvider
circle Dockerode
Room - RoomRepository
BaseRoomProvider o-- RoomRepository
DockerRoomProvider --|> BaseRoomProvider
DockerRoomProvider -left- Dockerode
Dockerode o-- QuizRoom
RoomController o-- BaseRoomProvider
RoomRouter o-- RoomController
class QuizRoom{
+/health: GET
+create-room()
+join-room()
+next-question()
+launch-student-mode()
+end-quiz()
+submit-answers()
-disconnect()
}
@enduml
```
Remarque: Les signatures de fonctions semblent un peu partout car il y a des fonctions de classes standard, des appels
HTTPS et des appels de sockets dans le même diagramme.
### Diagramme de séquence démontrant les communications
```plantuml
@startuml
actor Teacher
actor Student
entity Nginx
entity Frontend
entity Api
entity Docker
entity Database
group Quiz Creation
Teacher -> Frontend : Create a quizroom
Frontend -> Api : Create a quizroom
Api -> Docker : Create a quizroom
Docker -> QuizRoom **
QuizRoom -> Docker : creation successful
Docker -> Api : Creation Successful
loop every seconds until healthy or 30s:
Api -> QuizRoom : Checking Health via /health
QuizRoom -> Api : Doesn't answer, answer healthy or unhealthy
end
Api -> Database : Create Room
Database -> Api : Room created
Api -> Teacher : Route to room socket
end
group Quiz Joining:
Teacher -> Nginx : Join Room
Nginx -> Api : Get room infos from id
Api -> Nginx : Ip:port of room
Nginx -> QuizRoom: Give teacher's connexion
Student -> Frontend: Join Room X
Frontend -> Nginx : Join Room X
Nginx -> Api : Get room infos from id
Api -> Nginx : Ip:port of room
Nginx -> QuizRoom: Give student's connexion
QuizRoom -> QuizRoom : Give Quiz ... (Multiple actions)
Student -> QuizRoom: Disconnect
Teacher -> QuizRoom: Disconect
end
group QuizManagement (Every 10 seconds)
Api -> QuizRoom : Checking number of people in the room
QuizRoom -> Api : Number of people (0) or Unhealthy
Api -> Database : Mark room to deletion
end
group Quiz Deletion (Every 30 seconds)
Api -> Database : Give all rooms marked for deletion
Database -> Api : rooms
Api -> Docker : delete rooms
Docker -> QuizRoom : delete
Docker -> Api : Deleted
end
@enduml
```
## API
<swagger-ui src="salle-de-quiz-swagger.json"/>

View file

@ -0,0 +1,77 @@
# Documentation de déploiement avec Ansible
Ce guide explique comment utiliser **Ansible** pour déployer facilement le projet **ÉvalueTonSavoir**.
## Prérequis
### Système requis
- Un ordinateur sous **Linux** ou **Mac**.
- Pour **Windows**, installez [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) afin d'exécuter un environnement Ubuntu.
### Installation d'Ansible
1. **Sur Ubuntu (ou WSL2)** :
Utilisez le gestionnaire de paquets `apt` :
```bash
sudo apt update
sudo apt install ansible-core
```
2. **Autres systèmes** :
Consultez la [documentation officielle d'Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) pour connaître les étapes spécifiques à votre système.
### Installation de Docker et Docker Compose
- Suivez la [documentation Docker officielle](https://docs.docker.com/get-docker/) pour installer Docker.
- Docker Compose est inclus comme plugin Docker dans les versions récentes.
## Téléchargement des fichiers nécessaires
1. Clonez le dépôt Git contenant les fichiers Ansible :
```bash
git clone https://github.com/ets-cfuhrman-pfe/EvalueTonSavoir
```
2. Naviguez vers le répertoire `ansible` :
```bash
cd EvalueTonSavoir/ansible
```
## Déploiement avec Ansible
### Commande de déploiement
Pour déployer l'application, exécutez la commande suivante dans le répertoire contenant le fichier `deploy.yml` :
```bash
ansible-playbook -i inventory.ini deploy.yml
```
### Structure des fichiers utilisés
- **`inventory.ini`** : Défini les cibles du déploiement. Par défaut, il est configuré pour un déploiement local.
- **`deploy.yml`** : Playbook contenant les instructions pour installer, configurer et déployer l'application.
### Étapes effectuées par Ansible
1. **Installation des dépendances** :
- Vérifie et installe Docker si nécessaire.
2. **Démarrage des services** :
- Télécharge le fichier `docker-compose.yaml` depuis le dépôt Github.
- Lance les services définis avec Docker Compose.
3. **Vérification des conteneurs** :
- Vérifie que les conteneurs sont en cours d'exécution et fonctionnent correctement.
## Vérification du déploiement
Une fois le playbook exécuté, Ansible :
1. Installe Docker et ses dépendances.
2. Télécharge et configure le projet.
3. Lance les services avec Docker Compose.
4. Vérifie que les services sont accessibles localement.
Pour tester l'application, utilisez la commande suivante :
```bash
curl http://localhost:8080
```
Un code de réponse `200 OK` indiquera que le déploiement est réussi.
---
## Résumé
Le déploiement avec **Ansible** simplifie la gestion des configurations et l'installation des dépendances nécessaires
pour le projet **ÉvalueTonSavoir**. Avec cette méthode, vous pouvez déployer rapidement l'application dans un
environnement local tout en assurant une configuration cohérente.

View file

@ -0,0 +1,63 @@
## Prérequis
- Assurez-vous d'avoir Node JS installé en téléchargeant la dernière version depuis [https://nodejs.org/en](https://nodejs.org/en).
- Ensuite, assurez-vous d'avoir accès à un serveur MongoDB de développement
> Pour plus d'informations sur la base de données, veuillez consulter la documentation [[ici|Base-de-données]]
- Cloner le projet avec la commande suivante :
```
git clone https://github.com/ets-cfuhrman-pfe/EvalueTonSavoir.git
```
## Étape 1 - Démarrage du backend
1. Naviguez vers le répertoire du projet en utilisant la commande suivante :
```
cd .\EvalueTonSavoir\server\
```
2. Assurez-vous de créer le fichier .env et d'y ajouter les paramètres appropriés. Vous pouvez vous inspirer du fichier
.env.example pour connaître les paramètres nécessaires.
[[Voir ici la documentation des configurations|Configurations]]
3. Installez les dépendances avec la commande suivante :
```
npm install
```
4. Démarrez le serveur en utilisant la commande suivante :
```
npm run dev
```
5. Ouvrez votre navigateur et accédez à l'URL indiquée dans la console (par exemple, http://localhost:4400).
## Étape 2 - Démarrage du frontend
1. Naviguez vers le répertoire du projet en utilisant la commande suivante :
```
cd .\EvalueTonSavoir\client\
```
> [!WARNING]
> Assurez-vous que le backend est en cours d'exécution avant de démarrer le frontend. \
> Notez également l'URL du serveur pour le fichier `.env`.
2. Assurez-vous de créer le fichier .env et d'y ajouter les paramètres appropriés. Vous pouvez vous inspirer du fichier
.env.example pour connaître les paramètres nécessaires.
[[Voir ici la documentation des configurations|Configurations]]
3. Installez les dépendances avec la commande suivante :
```
npm install
```
4. Démarrez le frontend avec la commande suivante :
```
npm run dev
```
5. Ouvrez votre navigateur et accédez à l'URL indiquée dans la console (par exemple, http://localhost:5173/).

View file

@ -0,0 +1,61 @@
# Documentation de déploiement avec OpenTofu
Ce guide explique comment **OpenTofu** est utilisé pour déployer facilement le projet **ÉvalueTonSavoir**.
## Déploiement
### Étapes à réaliser pour faire le déploiement
Pour déployer à l'aide de OpenTofu, il suffit de suivre les étapes du fichier [README.md](https://github.com/ets-cfuhrman-pfe/EvalueTonSavoir/blob/main/opentofu/README.md).
### Structure des fichiers utilisés pour le déploiement sur Azure
- **`app.tf`** : Défini les configurations de la machine virtuelle qui exécute l'application.
- **`database.tf`** : Défini les configurations de la base de données.
- **`main.tf`** : Défini le fournisseur utilisé pour le déploiement, dans ce cas-ci Azure.
- **`network.tf`** : Défini les configurations réseau et les règles de sécurité réseau.
- **`resource_group.tf`** : Défini les configurations du groupes de ressources dans Azure.
- **`storage.tf`** : Défini les configurations pour stocker et pouvoir utiliser le fichier auth_config.json.
- **`terraform.tfvars`** : Défini les valeurs des variables à utiliser lors du déploiement.
- **`variables.tf`** : Défini toutes les variables qui sont utilisées lors du déploiement.
### Étapes effectuées par OpenTofu
1. **Création des éléments du réseau** :
- Création d'un réseau virtuel.
- Création d'un sous-réseau.
- Création d'une adresse ip publique.
- Création d'un groupe de sécurité réseau.
- Création d'une interface réseau.
2. **Création de la base de données** :
- Création du serveur de base de données.
- Création de la base de données (collection puisqu'on utilise MongoDB)
3. **Création de la machine virtuelle** :
- Création de la machine virtuelle.
- Installation de Docker
- Récupération du fichier `docker-compose.yaml` depuis le dépôt Github.
- Exécution de l'application avec le fichier `docker-compose.yaml`
## Résumé
Le déploiement avec **OpenTofu** simplifie la gestion des éléments nécessaires pour déployer le projet
**ÉvalueTonSavoir**. dans l'infonuagique. Avec cette méthode, vous pouvez déployer rapidement et facilement
l'application dans un environnement infonuagique.
## Diagramme de sequence
```plantuml
@startuml
actor Administrator
participant "Control Machine" as control_machine
participant "Azure" as azure
Administrator -> control_machine: "Se connecte à Azure"
Administrator -> control_machine: "Lancer le déploiement avec OpenTofu"
control_machine -> azure: "Crée les éléments réseaux"
control_machine -> azure: "Crée la base de données"
control_machine -> azure: "Crée la machine virtuelle qui exécute l'application"
control_machine <- azure: "OpenTofu retourne le résultat (success/échec)"
Administrator <- control_machine: "OpenTofu retourne le résultat (success/échec)"
@enduml
```

View file

@ -0,0 +1,230 @@
## Introduction
Nous avons choisi d'exécuter les composantes de cette application avec Docker, car cela simplifie le processus de
gestion des processus d'application.
Voici un diagramme de déploiement expliquant la relation des composantes et comment les images Docker sont créées et
déployées dans un serveur.
```plantuml
@startuml
skinparam style strictuml
skinparam component {
BackgroundColor<<Container>> LightBlue
BackgroundColor<<Image>> lightgreen
}
node "evalsa.etsmtl.ca" {
artifact "docker-compose.yml" as compose
node "Docker" as docker {
[evaluetonsavoir-routeur\n(nginx)] <<Container>> as ROC
[evaluetonsavoir-frontend\n(vite + TypeScript React)] <<Container>> as FEC
component "evaluetonsavoir-backend\n(Express, Javascript)" <<Container>> as BEC {
port API_REST
port SOCKET_SALLE
}
}
database "MongoDB" as BD
BD -- BEC
}
node "Docker hub" {
component evaluetonsavoir-routeur <<image>> as RO {
}
component evaluetonsavoir-frontend <<image>> as FE {
}
component evaluetonsavoir-backend <<image>> as BE {
}
}
node "GitHub" {
artifact "routeur-deploy.yml" <<action>> as RO_D
artifact "backend-deploy.yml" <<action>> as BE_D
artifact "frontend-deploy.yml" <<action>> as FE_D
}
BE <-- BE_D : on commit
FE <-- FE_D
RO <-- RO_D
BEC <.. BE : "pull à 5h du matin"
FEC <.. FE
ROC <.. RO
node "Navigateur moderne\n(Windows/Android)" as browser {
[React App] as RA_NAV
}
RA_NAV <.. FEC : chargée à partir des pages web
RA_NAV ..> API_REST : API REST
RA_NAV <..> SOCKET_SALLE : WebSocket
@enduml
```
## Prérequis
Les STI nous a fourni un serveur avec les spécifications suivantes :
- Ubuntu 22.04 LTS
- CPU : 4 cœurs
- RAM : 8 Go
- HDD : 100 Go
- Certificat SSL
Les STI ont déjà effectué la configuration initiale de la machine selon leurs normes de mise en place d'un serveur pour
assurer la bonne maintenance et sécurité au sein de leur infrastructure. Cette configuration inclut un utilisateur non root.
Vous aurez également besoin d'un compte Docker Hub, ou vous pouvez simplement créer une PR sur le projet principal et
elle sera déployée automatiquement.
## Étape 1 - Installation de Docker
Connectez-vous avec les informations d'identification de l'ETS :
```
ssh <email>@<IP>
```
Tout d'abord, mettez à jour votre liste existante de packages :
```
sudo apt update
```
Ensuite, installez quelques packages prérequis qui permettent à apt d'utiliser des packages via HTTPS :
> [!WARNING]
> Si vous voyez l'erreur suivante, ARRÊTEZ. Contactez les STI pour résoudre le problème. \
> `Waiting for cache lock: Could not get lock /var/lib/dpkg/lock-frontend. It is held by process 10703 (apt)`
```
sudo apt install apt-transport-https ca-certificates curl software-properties-common
```
Ajoutez la clé GPG du référentiel Docker officiel à votre système :
```
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
```
Ajoutez le référentiel Docker aux sources APT :
```
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
```
Mettez à jour à nouveau votre liste existante de packages pour que l'ajout soit reconnu :
```
sudo apt update
```
Assurez-vous que vous vous apprêtez à installer à partir du référentiel Docker plutôt que du référentiel Ubuntu par défaut :
```
apt-cache policy docker-ce
```
Vous verrez une sortie comme celle-ci, bien que le numéro de version pour Docker puisse être différent :
```Output
docker-ce:
Installed: (none)
Candidate: 5:26.0.0-1~ubuntu.22.04~jammy
Version table:
5:26.0.0-1~ubuntu.22.04~jammy 500
500 https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages
5:25.0.5-1~ubuntu.22.04~jammy 500
500 https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages
...
```
Installez Docker :
```
sudo apt install docker-ce
```
Vérifiez que Docker fonctionne :
```
sudo systemctl status docker
```
La sortie devrait être similaire à ce qui suit, montrant que le service est actif et en cours d'exécution :
```Output
● docker.service - Docker Application Container Engine
Loaded: loaded (/lib/systemd/system/docker.service; enabled; vendor preset: enabled)
Active: active (running) since Fri 2024-04-05 13:20:12 EDT; 1min 24s ago
TriggeredBy: ● docker.socket
Docs: https://docs.docker.com
Main PID: 19389 (dockerd)
Tasks: 10
Memory: 28.7M
CPU: 172ms
CGroup: /system.slice/docker.service
└─19389 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
...
```
> [!NOTE]
> Si Docker ne roule pas, p.ex. vous voyez :
> ```
> ○ docker.service - Docker Application Container Engine
> Loaded: loaded (/lib/systemd/system/docker.service; enabled; vendor preset: enabled)
> Active: inactive (dead)
> ```
> Vous devez démarrer Docker :
> ```
> sudo systemctl start docker
> ```
## Étape 2 - Installation de Docker Compose
Créez un répertoire d'installation Docker Compose :
```
mkdir -p ~/.docker/cli-plugins/
```
Obtenez Docker Compose :
```
curl -SL https://github.com/docker/compose/releases/download/v2.26.1/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose
```
Ensuite, définissez les permissions correctes pour que la commande docker compose soit exécutable :
```
chmod +x ~/.docker/cli-plugins/docker-compose
```
Pour vérifier que l'installation a réussi, vous pouvez exécuter :
```
docker compose version
```
## Étape 3 - Ajouter notre projet
Commencez par créer un nouveau répertoire dans votre dossier personnel :
```
mkdir ~/EvalueTonSavoir
```
Puis déplacez-vous dans le répertoire :
```
cd ~/EvalueTonSavoir
```
Créez un fichier `docker-compose.yaml` à partir du dépôt GitHub :
```
curl -SL https://raw.githubusercontent.com/ets-cfuhrman-pfe/EvalueTonSavoir/main/docker-compose.yaml -o docker-compose.yaml
```
> [!NOTE]
> Avant de continuer, veuillez noter qu'il est crucial de mettre à jour les variables d'environnement dans le script,
> car les valeurs actuelles sont des modèles génériques. Assurez-vous de personnaliser ces variables selon les besoins
> spécifiques de votre environnement avant d'exécuter le script.
Avec le fichier docker-compose.yml en place, vous pouvez maintenant exécuter Docker Compose pour démarrer votre environnement :
```
sudo docker compose up -d
```
Vérifiez que les services fonctionne :
```
sudo docker ps -a
```
## Conclusion
Félicitations ! Vous avez maintenant avec succès configuré et lancé EvalueTonSavoir sur votre serveur, prêt à être utilisé.

View file

@ -0,0 +1,28 @@
# A propos
## Lancer la documentation
Pour lancer la documentation, il faut installer python et entrer dans le dossier documentation.
Il faut ensuite installer les dépendances avec `pip install -r requirements.txt`.
Pour lancer le mode développement il faut executer `python -m mkdocs serve`
Afin d'accélérer le déploiement et ne pas être touché par des erreurs de "rate-limiting", il est préférable d'utiliser
une image docker de plantuml. Pour cela, il faut utiliser la commande suivante :
`docker run -d --name plantuml -p 8080:8080 plantuml/plantuml-server:tomcat`
## Déploiement
Le code est automatiquement déployé par la github-action `create-docs.yaml`
Celle-ci ouvre le repo et fait les mêmes étapes que "lancer la documentation".
Il y a une différence, elle utilise `build` au lieu de `serve` pour ensuite publier avec l'outil [`ghp-import`](https://github.com/c-w/ghp-import).
La page est poussée sur la branche [`gh-pages`](https://github.com/ets-cfuhrman-pfe/EvalueTonSavoir/tree/gh-pages) et ensuite publié en tant que [gh-page](https://pages.github.com/)
## Themes et Plugins
Si vous ajoutez des plugins, veuillez mettre a jour le fichier `requirements.txt`.
La documentation utilise [MkDocs](https://www.mkdocs.org/) avec [le theme matérial]((https://squidfunk.github.io/mkdocs-material/)). Il y a bien des fonctionalités tel que les c
ode-blocks qui peuvent être activés.
Vous pouvez avoir accès a la documentation ici : [https://squidfunk.github.io/mkdocs-material/reference/code-blocks/](https://squidfunk.github.io/mkdocs-material/reference/code-blocks/)
## Autre méthode de lancement (virtuel)
Si vous avez un probleme avec votre environement et vous avez besoin d'un environement virtuel, il s'agit de faire
`python -m venv .venv` dans le dossier document et d'activer cet environemment avec le fichier activate (changeant
dépendant de votre invite de commande) : `.venv\script\activate`.
Vous pouvez ensuite continuer les autres étapes.

View file

@ -0,0 +1,22 @@
## À Propos
Ce projet représente une interface utilisateur React pour notre application.
## GIFT text format render (code source)
Le code original a été développé pour créer une extension VS afin de prendre en charge le format de texte GIFT.
Le code peut être trouvé ici: [https://codesandbox.io/s/gift-templates-iny09](https://codesandbox.io/s/gift-templates-iny09)
Nous avons décidé de réutiliser ce code car il fournit un aperçu proche de ce à quoi ressemblent les quiz dans Moodle,
étant une plateforme bien connue à l'École de Technologie Supérieure (ÉTS).
Pour réutiliser le code, nous avons dû installer les packages NPM suivants:
- [katex](https://www.npmjs.com/package/katex) : Une bibliothèque JavaScript rapide et facile à utiliser pour le rendu mathématique TeX sur le web.
- [marked](https://www.npmjs.com/package/marked) : Un analyseur syntaxique et un compilateur de markdown construit pour la vitesse.
- [nanoid](https://www.npmjs.com/package/nanoid) : Un générateur d'identifiants de chaîne unique, sécurisé, convivial pour les URL et minuscule (108 octets) pour JavaScript.
- [gift-pegjs](https://www.npmjs.com/package/gift-pegjs) : Un analyseur GIFT pour JavaScript utilisant PEG.js.
- [@types/katex](https://www.npmjs.com/package/@types/katex) : Définitions TypeScript pour katex.
- [@types/marked](https://www.npmjs.com/package/@types/marked) : Définitions TypeScript pour marked.
- [@types/nanoid](https://www.npmjs.com/package/@types/nanoid) : Définitions TypeScript pour nanoid.

View file

@ -0,0 +1,83 @@
# Structure haut niveau
## But du projet
ÉvalueTonSavoir a été créé dû aux coûts importants des versions entreprises des logiciels similaires tels que Socrative et
Kahoot. Le but principal est dêtre capable davoir une plateforme auto-hébergée et bien intégrée dans les systèmes
déjà présents des établissements scolaire.
## Requis
Le but du projet est d'avoir un outil gratuit et libre afin d'améliorer l'apprentissage avec les fonctionnalités suivantes :
- Permettre aux personnel enseignant de créer des quizs
- Permettre aux enseignant de collecter les résultats des quizs
- Permettre aux étudiants de faire ces quizs
- Permettre aux étudiants d'avoir une rétroaction
Afin de limiter le niveau de difficulté d'intégration du personnel enseignant:
- L'utilisation du format [`GIFT`](https://docs.moodle.org/405/en/GIFT_format) déja présent dans moodle doit être utilisé
- Le personnel et les étudiants doivent être capable de s'authentifier avec le portail de l'école
- Le démarrage du quiz doit se faire de façon rapide et efficace.
Afin de faciliter le déploiement de masse :
- Le logiciel doit être facile a déployer sur des machines locales
- Le logiciel doit être facile a déployer sur le cloud
- Le logiciel doit s'interconnecter à l'infrastructure présente
- Le logiciel doit être performant et fiable
## Architecture actuelle
```plantuml
@startuml
package Proxy{
component Nginx
}
package App{
component Frontend
component Backend
database MongoDb
}
cloud Provider{
component QuizRoom
}
Nginx --down-> Backend
Nginx --down-> Frontend
Nginx --down-> Provider
Backend --right-> MongoDb
Backend --up-> Nginx
Frontend --up-> Nginx
@enduml
```
### Details techniques
Le tableau ci-dessus est simplifié grandement car toutes les composantes sont individuelles. Ce qui veut dire que chacune
des parties pouraient être déployées sur un serveur différent et tout de même fonctionner. Ceci permettrai de distribuer
la charge de travail facilement entre plusieurs serveurs.
Le proxy Nginx permet de camoufler la séparation du backend et frontend en réunissant les deux parties sous la même url.
Il a aussi la tâche de diriger les appels de sockets vers leur machine interne dans le provider.
Le frontend dessert la partie visuelle de l'application.
Le backend s'occupe de tout les services suivants :
- La gestion des utilisateurs
- La gestion des quizs
- La gestion des médias
- La gestion des salles
### Liens vers détails supplémentaires
- [Gestion de l'authentification](./backend/auth.md)
- [Gestion de la salle de Quiz](./backend/salle-de-quiz.md)

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

View file

@ -0,0 +1,79 @@
# Tests de Charge
Pour tester la montée en charge et les performances du projet, un **test de charge** est inclus dans `test/stressTest`. Il est conçu spécifiquement pour **evalue-ton-savoir**, avec un focus sur les communications serveur-client et client-client.
---
## Routes utilisé sur le quizRoom
- **`get-usage`** : Récupère les ressources des conteneurs du réseau.
- **`message-from-teacher`** : Transfert de messages des professeurs aux étudiants.
- **`message-from-student`** : Transfert de messages des étudiants aux professeurs.
---
## Fonctionnement
1. **Authentification** : Récupère un token depuis lAPI backend.
2. **Configuration** : Crée les salles de quiz et connecte un professeur à chaque salle.
3. **Connexion des étudiants** : Connecte les étudiants aux salles selon les paramètres.
4. **Simulation** : Messages simulés entre professeurs et étudiants.
5. **Collecte des données** : Collecte les métriques de ressources pour analyse.
---
## Exécution
L'exécution des commandes doit se faire ici: `/test/stressTest`
### Directe
```bash
node main.js
```
- Node.js doit être installé.
- Modifiez les variables dans main.js.
### Docker
```bash
docker-compose up
```
- Docker doit être installé.
- Configurez un fichier .env.
## Variables dEnvironnement
Les variables sont définies dans un fichier `.env` :
- **BASE_URL** : URL à tester.
- **USER_EMAIL**, **USER_PASSWORD** : Identifiants pour créer et gérer les salles.
- **NUMBER_ROOMS** : Nombre de salles.
- **USERS_PER_ROOM** : Nombre détudiants par salle.
### Variables Optionnelles
- **MAX_MESSAGES_ROUND** : Nombre maximum de messages par cycle.
- **CONVERSATION_INTERVAL** : Délai (ms) entre les messages.
- **MESSAGE_RESPONSE_TIMEOUT** : Délai (ms) avant de considérer un message sans réponse.
- **BATCH_DELAY** : Délai (ms) entre les envois par lots.
- **BATCH_SIZE** : Taille des lots de messages.
---
## Résultats Collectés
### Métriques
- **Salles créées / échouées**
- **Utilisateurs connectés / échoués**
- **Messages tentés, envoyés, reçus**
### Rapports
- **JSON** : Pour analyse automatisée.
- **Rapport texte** : Résumé lisible.
- **Graphiques** *(via ChartJS)* :
- **CPU**, **mémoire**, **charge**.
### Exemple graphique:
![ Exemple graphique](./test-charge-output.png)

View file

@ -0,0 +1,23 @@
# A propos
EvalueTonSavoir est une plateforme open source et auto-hébergée qui poursuit le développement du code provenant de [https://github.com/ETS-PFE004-Plateforme-sondage-minitest](https://github.com/ETS-PFE004-Plateforme-sondage-minitest). Cette plateforme minimaliste est conçue comme un outil d'apprentissage et d'enseignement, offrant une solution simple et efficace pour la création de quiz utilisant le format GIFT, similaire à Moodle.
## Fonctionnalités clés
* Open Source et Auto-hébergé : Possédez et contrôlez vos données en déployant la plateforme sur votre propre infrastructure.
* Compatibilité GIFT : Créez des quiz facilement en utilisant le format GIFT, permettant une intégration transparente avec d'autres systèmes d'apprentissage.
* Minimaliste et Efficace : Une approche bare bones pour garantir la simplicité et la facilité d'utilisation, mettant l'accent sur l'essentiel de l'apprentissage.
## Contribution
Actuellement, il n'y a pas de modèle établi pour les contributions. Si vous constatez quelque chose de manquant ou si vous pensez qu'une amélioration est possible, n'hésitez pas à ouvrir un issue et/ou une PR)
## Liens utiles
* [Dépôt d'origine Frontend](https://github.com/ETS-PFE004-Plateforme-sondage-minitest/ETS-PFE004-EvalueTonSavoir-Frontend)
* [Dépôt d'origine Backend](https://github.com/ETS-PFE004-Plateforme-sondage-minitest/ETS-PFE004-EvalueTonSavoir-Backend)
* [Documentation (Wiki)](https://github.com/ets-cfuhrman-pfe/EvalueTonSavoir/wiki)
## License
EvalueTonSavoir is open-sourced and licensed under the [MIT License](/LICENSE).

View file

@ -0,0 +1,10 @@
document$.subscribe(({ body }) => {
renderMathInElement(body, {
delimiters: [
{ left: "$$", right: "$$", display: true },
{ left: "$", right: "$", display: false },
{ left: "\\(", right: "\\)", display: false },
{ left: "\\[", right: "\\]", display: true }
],
})
})

View file

@ -0,0 +1,84 @@
> [!NOTE]
> Chaque projet contient un fichier `.env.example` fournissant des exemples de configuration.
> Assurez-vous de consulter ce fichier pour vous inspirer des paramètres nécessaires à votre configuration.
> [!NOTE]
> Ce sont toutes les options de configuration. N'hésitez pas à ouvrir une PR si vous en voyez qui manquent.
## Options de Configuration Backend
| Variable d'Environnement | Description | Exemple | Optionnel |
|---|---|---|---|
| `PORT` | Le port sur lequel l'application fonctionne | 4400 | non|
| `MONGO_URI` | La chaîne de connexion pour se connecter à la base de données mongodb | `mongodb://localhost:27017` or `mongodb://127.0.0.1:27017` (the former can cause trouble on Windows depending on hosts files) | non|
| `MONGO_DATABASE` | Le nom souhaité pour la base de données | evaluetonsavoir | non|
| `EMAIL_SERVICE` | Le service utilisé pour les e-mails | gmail | non|
| `SENDER_EMAIL` | L'adresse e-mail utilisée pour l'envoi | monadresse@gmail.com | non|
| `EMAIL_PSW` | Le mot de passe de l'adresse e-mail | 'monmotdepasse' | non|
| `JWT_SECRET` | Le secret utilisé pour la gestion des JWT | monsecretJWT | non|
| `FRONTEND_URL` | URL du frontend, y compris le port | http://localhost:5173 | non|
## Options de Configuration Frontend
| Variable d'Environnement | Description | Exemple | Optionnel |
|---|---|---|---|
| `VITE_BACKEND_URL` | URL du backend, y compris le port | http://localhost:4400 | non|
| `VITE_AZURE_BACKEND_URL` | URL du backend, y compris le port | http://localhost:4400 | non|
## Options de Configuration du routeur
| Variable d'Environnement | Description | Exemple | Optionnel défaut |
|---|---|---|---|
| `PORT` | Numero de port sur lequel la NGINX écoute | http://localhost:80 | oui|
| `FRONTEND_HOST` | Url relié au Frontend | http://localhost |oui
| `FRONTEND_PORT` | Port relié au Frontend | http://localhost:5173 | oui|
| `BACKEND_HOST` | Url relié au Backend | http://localhost |oui
| `BACKEND_PORT` | Port relié au Backend | http://localhost:3000 | oui|
## Options de Configuration de la salle de Quiz
| Variable d'Environnement | Description | Exemple | Optionnel défaut |
|---|---|---|---|
| `PORT` | Numero de port sur lequel la salle écoute | http://localhost:4500 | oui|
| `ROOM_ID` | Numéro de la salle | http://localhost/rooms/000000 | oui|
## HealthChecks
### Frontend
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:$${PORT} || exit 1"]
interval: 5s
timeout: 10s
start_period: 5s
retries: 6
### Backend
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:$${PORT}/health || exit 1"]
interval: 5s
timeout: 10s
start_period: 5s
retries: 6
### Salle de Quiz
healthcheck:
test: ["CMD", "/usr/src/app/healthcheck.sh"]
interval: 5s
timeout: 10s
start_period: 5s
retries: 6
### Routeur
healthcheck:
test: ["CMD-SHELL", "wget --spider http://0.0.0.0:$${PORT}/health || exit 1"]
interval: 5s
timeout: 10s
start_period: 5s
retries: 6
### MongoDb
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
interval: 10s
timeout: 5s
retries: 3
start_period: 20s

View file

@ -0,0 +1,18 @@
# Déploiement
Les méthodes recommandées de déploiement sont via Ansible et Opentofu.
Ansible est utilisés afin de faire un déploiement sur un serveur local, opentofu sur le cloud.
## Ansible
Le déploiement avec ansible est un déploiement simplifié.
Il vous suffit d'avoir un ordinateur linux/mac ou pouvant faire exécuter [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install)
dans le cas de windows. Il faut ensuite utiliser le gestionnaire de paquet (souvent apt) afin d'installer
le paquet `ansible-core`, d'autres méthodes sont indiquées dans la [documentation officielle de ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html).
Une fois le tout fait, vous pouvez telécharger [les fichiers nécéssaire](https://github.com/ets-cfuhrman-pfe/EvalueTonSavoir/ansible) et lancer la commande
`ansible-playbook -i inventory.ini deploy.yml`
## OpenTofu
Le déploiement avec OpenTofu est un peu plus complexe mais il permet d'héberger la solution sur votre cloud préféré.
Il suffit [d'installer OpenTofu](https://opentofu.org/docs/intro/install/) et de téléchgarger [les fichiers nécéssaires](https://github.com/ets-cfuhrman-pfe/EvalueTonSavoir/opentofu).
Un Readme est inclus afin d'organiser votre grappe de serveurs.

79
documentation/mkdocs.yml Normal file
View file

@ -0,0 +1,79 @@
site_name: EvalueTonSavoir
repo_url: https://github.com/ets-cfuhrman-pfe/EvalueTonSavoir
edit_uri: edit/main/documentation/docs
theme:
language: fr
icon:
repo: fontawesome/brands/github
name: material
palette:
# Palette toggle for light mode
- media: "(prefers-color-scheme: light)"
scheme: default
primary: red
accent: pink
toggle:
icon: material/brightness-7
name: Mode sombre
# Palette toggle for dark mode
- media: "(prefers-color-scheme: dark)"
scheme: slate
primary: red
accent: pink
toggle:
icon: material/brightness-4
name: Mode clair
features:
- content.code.copy
- content.code.select
- content.code.annotate
- navigation.instant
- navigation.instant.progress
- navigation.tracking
- content.action.edit
highlightjs: true
hljs_languages:
- javascript
- typescript
- css
- react
- yaml
- latex
- katex
- gift
use_directory_urls: false
plugins:
- search
- offline
- plantuml:
puml_url: !ENV [PUMLURL,'http://localhost:8080'] # dev
puml_keyword: plantuml
theme:
light: material/red-light
dark: material/red-dark
- swagger-ui-tag:
docExpansion: "list"
tryItOutEnabled: false
markdown_extensions:
- pymdownx.highlight:
anchor_linenums: true
line_spans: __span
pygments_lang_class: true
- pymdownx.inlinehilite
- pymdownx.snippets
- pymdownx.superfences
- pymdownx.arithmatex:
generic: true
extra_javascript:
- javascripts/katex.js
- https://unpkg.com/katex@0/dist/katex.min.js
- https://unpkg.com/katex@0/dist/contrib/auto-render.min.js
extra_css:
- https://unpkg.com/katex@0/dist/katex.min.css

View file

@ -0,0 +1 @@
3.12

View file

@ -0,0 +1,7 @@
mkdocs
mkdocs[i18n]
mkdocs_puml
mkdocs-material
Pygments
ghp-import
mkdocs-swagger-ui-tag

5
nginx/.env.example Normal file
View file

@ -0,0 +1,5 @@
PORT=80
FRONTEND_HOST=frontend
FRONTEND_PORT=5173
BACKEND_HOST=backend
BACKEND_PORT=3000

View file

@ -1,3 +1,90 @@
FROM nginx
# Stage 1: Build stage
FROM nginx:1.27-alpine AS builder
# Install required packages
RUN apk add --no-cache nginx-mod-http-js nginx-mod-http-keyval
COPY ./default.conf /etc/nginx/conf.d/default.conf
# Stage 2: Final stage
FROM alpine:3.19
# Install gettext for envsubst and other dependencies
RUN apk add --no-cache \
gettext \
curl \
nginx-mod-http-js \
nginx-mod-http-keyval \
pcre2 \
ca-certificates \
pcre \
libgcc \
libstdc++ \
zlib \
libxml2 \
libedit \
geoip \
libxslt
# Create base nginx directory
RUN mkdir -p /etc/nginx
# Copy Nginx and NJS modules from builder
COPY --from=builder /usr/sbin/nginx /usr/sbin/
COPY --from=builder /usr/lib/nginx/modules/ /usr/lib/nginx/modules/
RUN rm -rf /etc/nginx/*
COPY --from=builder /etc/nginx/ /etc/nginx/
COPY --from=builder /usr/lib/nginx/ /usr/lib/nginx/
# Setup directories and permissions
RUN mkdir -p /var/cache/nginx \
&& mkdir -p /var/log/nginx \
&& mkdir -p /etc/nginx/conf.d \
&& mkdir -p /etc/nginx/njs \
&& mkdir -p /etc/nginx/templates \
&& chown -R nginx:nginx /var/cache/nginx \
&& chown -R nginx:nginx /var/log/nginx \
&& chown -R nginx:nginx /etc/nginx \
&& touch /var/run/nginx.pid \
&& chown nginx:nginx /var/run/nginx.pid \
&& chmod 777 /var/log/nginx
# Copy necessary libraries from builder
COPY --from=builder /usr/lib/libxml2.so* /usr/lib/
COPY --from=builder /usr/lib/libexslt.so* /usr/lib/
COPY --from=builder /usr/lib/libgd.so* /usr/lib/
COPY --from=builder /usr/lib/libxslt.so* /usr/lib/
# Modify nginx.conf to load modules
RUN echo 'load_module modules/ngx_http_js_module.so;' > /tmp/nginx.conf && \
cat /etc/nginx/nginx.conf >> /tmp/nginx.conf && \
mv /tmp/nginx.conf /etc/nginx/nginx.conf
# Copy configurations
COPY templates/default.conf /etc/nginx/templates/
COPY njs/main.js /etc/nginx/njs/
COPY entrypoint.sh /entrypoint.sh
RUN dos2unix /entrypoint.sh
ENV PORT=80 \
FRONTEND_HOST=frontend \
FRONTEND_PORT=5173 \
BACKEND_HOST=backend \
BACKEND_PORT=3000
# Set final permissions
RUN chmod +x /entrypoint.sh && \
chown -R nginx:nginx /etc/nginx && \
chown -R nginx:nginx /var/log/nginx && \
chown -R nginx:nginx /var/cache/nginx && \
chmod 755 /etc/nginx && \
chmod 777 /etc/nginx/conf.d && \
chmod 644 /etc/nginx/templates/default.conf && \
chmod 644 /etc/nginx/conf.d/default.conf
# Switch to nginx user
USER nginx
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD wget -q --spider http://0.0.0.0:${PORT}/health || exit 1
# Start Nginx using entrypoint script
# CMD [ "/bin/sh","-c","sleep 3600" ] # For debugging
ENTRYPOINT [ "/entrypoint.sh" ]

View file

@ -1,31 +0,0 @@
upstream frontend {
server frontend:5173;
}
upstream backend {
server backend:3000;
}
server {
listen 80;
location /api {
rewrite /backend/(.*) /$1 break;
proxy_pass http://backend;
}
location /socket.io {
rewrite /backend/(.*) /$1 break;
proxy_pass http://backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
proxy_hide_header 'Access-Control-Allow-Origin';
}
location / {
proxy_pass http://frontend;
}
}

15
nginx/entrypoint.sh Normal file
View file

@ -0,0 +1,15 @@
#!/bin/sh
# entrypoint.sh
# We are already running as nginx user
envsubst '${PORT} ${FRONTEND_HOST} ${FRONTEND_PORT} ${BACKEND_HOST} ${BACKEND_PORT}' \
< /etc/nginx/templates/default.conf \
> /etc/nginx/conf.d/default.conf
# Adds logs for docker
ln -sf /dev/stdout /var/log/nginx/access.log
ln -sf /dev/stderr /var/log/nginx/error.log
ln -sf /dev/stderr /var/log/nginx/debug.log
# Start nginx
exec nginx -g "daemon off;"

97
nginx/njs/main.js Normal file
View file

@ -0,0 +1,97 @@
function get_cache_dict(r) {
return '';
}
function getCachedData(r, key) {
try {
const cached = ngx.shared.cache.get(key);
if (cached) {
const data = JSON.parse(cached);
const now = Date.now();
// 2 minutes cache - let game rooms rotate
if (now - data.timestamp < 120000) {
r.error(`Debug: Cache hit for ${key}, age: ${(now - data.timestamp)/1000}s`);
return data.value;
}
r.error(`Debug: Cache expired for ${key}, age: ${(now - data.timestamp)/1000}s`);
}
return null;
} catch (error) {
r.error(`Cache read error: ${error}`);
return null;
}
}
function setCachedData(r, key, value) {
try {
const data = {
timestamp: Date.now(),
value: value
};
ngx.shared.cache.set(key, JSON.stringify(data));
r.error(`Debug: Cached ${key}`);
} catch (error) {
r.error(`Cache write error: ${error}`);
}
}
async function fetchRoomInfo(r) {
const cacheKey = `room:${r.variables.room_id}`;
try {
const cachedRoom = getCachedData(r, cacheKey);
if (cachedRoom) {
r.error(`Debug: Room info from cache: ${JSON.stringify(cachedRoom)}`);
return cachedRoom;
}
let res = await r.subrequest('/api/room/' + r.variables.room_id, {
method: 'GET'
});
if (res.status !== 200) {
r.error(`Failed to fetch room info: ${res.status}`);
return null;
}
let room = JSON.parse(res.responseText);
setCachedData(r, cacheKey, room);
r.error(`Debug: Room info fetched and cached: ${JSON.stringify(room)}`);
return room;
} catch (error) {
r.error(`Error fetching/caching room info: ${error}`);
return null;
}
}
export default {
get_cache_dict,
routeWebSocket: async function(r) {
try {
const roomInfo = await fetchRoomInfo(r);
if (!roomInfo || !roomInfo.host) {
r.error(`Debug: Invalid room info: ${JSON.stringify(roomInfo)}`);
r.return(404, 'Room not found or invalid');
return;
}
let proxyUrl = roomInfo.host;
if (!proxyUrl.startsWith('http://') && !proxyUrl.startsWith('https://')) {
proxyUrl = 'http://' + proxyUrl;
}
r.error(`Debug: Original URL: ${r.uri}`);
r.error(`Debug: Setting proxy target to: ${proxyUrl}`);
r.error(`Debug: Headers: ${JSON.stringify(r.headersIn)}`);
r.variables.proxy_target = proxyUrl;
r.internalRedirect('@websocket_proxy');
} catch (error) {
r.error(`WebSocket routing error: ${error}`);
r.return(500, 'Internal routing error');
}
}
};

View file

@ -0,0 +1,81 @@
js_shared_dict_zone zone=cache:10m;
js_import njs/main.js;
js_set $cache_dict main.get_cache_dict;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream frontend {
server ${FRONTEND_HOST}:${FRONTEND_PORT};
}
upstream backend {
server ${BACKEND_HOST}:${BACKEND_PORT};
}
server {
listen ${PORT};
set $proxy_target "";
location /health {
access_log off;
add_header Content-Type text/plain;
return 200 'healthy';
}
location /backend-health {
proxy_pass http://backend/health;
proxy_http_version 1.1;
proxy_set_header Host $host;
access_log off;
}
location /frontend-health {
proxy_pass http://frontend;
proxy_http_version 1.1;
proxy_set_header Host $host;
access_log off;
}
location /api {
proxy_pass http://backend;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
# Game WebSocket routing
location ~/api/room/([^/]+)/socket {
set $room_id $1;
js_content main.routeWebSocket;
}
# WebSocket proxy location
location @websocket_proxy {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Timeouts
proxy_connect_timeout 7m;
proxy_send_timeout 7m;
proxy_read_timeout 7m;
proxy_buffering off;
proxy_pass $proxy_target;
}
location / {
proxy_pass http://frontend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}

44
opentofu/README.md Normal file
View file

@ -0,0 +1,44 @@
# Déploiement avec Opentofu
## Microsoft Azure
### Installer opentofu
https://opentofu.org/docs/intro/install/
### Installer Azure CLI
https://learn.microsoft.com/en-us/cli/azure/install-azure-cli#install
### Se connecter à Azure et récupérer l'id de l'abonnement Azure
Pour se connecter à Azure, faites la commande suivante
`az login`
Avec cette commande, vous allez sélectionner un abonnement Azure. Copiez l'id de l'abonnement, vous en aurez besoin
dans l'étape suivant.
### Modifier les configurations
Créer un fichier **terraform.tfvars** sur la base du fichier **terraform.tfvars.example** dans le répertoire **azure**.
Vous pouvez changer toutes les variables utilisée lors du déploiement dans ce fichier.
Toutes les variables, leur description et leur valeur par défaut sont disponibles dans le fichier **variables.tf**.
Créer un fichier **auth_config.json** sur la base du fichier **auth_config.json.example** dans le répertoire **opentofu**.
L'url est défini comme suit: http://<container_group_app_dns>.<location>.cloudapp.azure.com.
Par défaut, l'url est http://evaluetonsavoir.canadacentral.cloudapp.azure.com/
### Lancer le déploiement
Pour lancer le déploiement, faites les commandes suivantes
`cd azure`
`az login`
`tofu init`
`tofu apply`
Ensuite, opentofu va afficher toutes les actions qu'il va effectuer avec les valeurs configurées.
Entrez `yes` pour appliquer ces actions et lancer le déploiement.

View file

@ -0,0 +1,35 @@
{
auth: {
passportjs: [
{
provider1: {
type: "oauth",
OAUTH_AUTHORIZATION_URL: "https://www.testurl.com/oauth2/authorize",
OAUTH_TOKEN_URL: "https://www.testurl.com/oauth2/token",
OAUTH_USERINFO_URL: "https://www.testurl.com/oauth2/userinfo/",
OAUTH_CLIENT_ID: "your_oauth_client_id",
OAUTH_CLIENT_SECRET: "your_oauth_client_secret",
OAUTH_ADD_SCOPE: "scopes",
OAUTH_ROLE_TEACHER_VALUE: "teacher-claim-value",
OAUTH_ROLE_STUDENT_VALUE: "student-claim-value",
},
},
{
provider2: {
type: "oidc",
OIDC_CLIENT_ID: "your_oidc_client_id",
OIDC_CLIENT_SECRET: "your_oidc_client_secret",
OIDC_CONFIG_URL: "https://your-issuer.com",
OIDC_ADD_SCOPE: "groups",
OIDC_ROLE_TEACHER_VALUE: "teacher-claim-value",
OIDC_ROLE_STUDENT_VALUE: "student-claim-value",
},
},
],
"simpleauth": {
enabled: true,
name: "provider3",
SESSION_SECRET: "your_session_secret",
},
},
}

67
opentofu/azure/app.tf Normal file
View file

@ -0,0 +1,67 @@
# Create Virtual Machine
resource "azurerm_linux_virtual_machine" "vm" {
name = var.vm_name
resource_group_name = azurerm_resource_group.resource_group.name
location = azurerm_resource_group.resource_group.location
size = var.vm_size
admin_username = var.vm_user
admin_password = var.vm_password
disable_password_authentication = false
network_interface_ids = [azurerm_network_interface.nic.id]
os_disk {
name = var.vm_os_disk_name
caching = "ReadWrite"
storage_account_type = var.vm_os_disk_type
}
source_image_reference {
publisher = var.vm_image_publisher
offer = var.vm_image_offer
sku = var.vm_image_plan
version = var.vm_image_version
}
custom_data = base64encode(<<-EOT
#!/bin/bash
sudo apt-get update -y
sudo apt-get install -y docker.io
sudo apt-get install -y docker-compose
sudo systemctl start docker
sudo systemctl enable docker
sudo usermod -aG docker ${var.vm_user}
sudo newgrp docker
su - ${var.vm_user} -c '
curl -o auth_config.json \
"https://${azurerm_storage_account.storage_account.name}.file.core.windows.net/${azurerm_storage_share.backend_storage_share.name}/auth_config.json${data.azurerm_storage_account_sas.storage_access.sas}"
curl -L -o docker-compose.yaml ${var.docker_compose_url}
export VITE_BACKEND_URL=http://${var.dns}.${lower(replace(azurerm_resource_group.resource_group.location, " ", ""))}.cloudapp.azure.com
export PORT=${var.backend_port}
export MONGO_URI="${azurerm_cosmosdb_account.cosmosdb_account.primary_mongodb_connection_string}"
export MONGO_DATABASE=${azurerm_cosmosdb_mongo_collection.cosmosdb_mongo_collection.database_name}
export EMAIL_SERVICE=${var.backend_email_service}
export SENDER_EMAIL=${var.backend_email_sender}
export EMAIL_PSW="${var.backend_email_password}"
export JWT_SECRET=${var.backend_jwt_secret}
export SESSION_Secret=${var.backend_session_secret}
export SITE_URL=http://${var.dns}.${lower(replace(azurerm_resource_group.resource_group.location, " ", ""))}.cloudapp.azure.com
export FRONTEND_PORT=${var.frontend_port}
export USE_PORTS=${var.backend_use_port}
export AUTHENTICATED_ROOMS=${var.backend_use_auth_student}
export QUIZROOM_IMAGE=${var.quizroom_image}
docker-compose up -d
'
EOT
)
depends_on = [
azurerm_cosmosdb_mongo_collection.cosmosdb_mongo_collection,
data.azurerm_storage_account_sas.storage_access]
}

View file

@ -0,0 +1,43 @@
resource "azurerm_cosmosdb_account" "cosmosdb_account" {
name = var.cosmosdb_account_name
resource_group_name = azurerm_resource_group.resource_group.name
location = azurerm_resource_group.resource_group.location
offer_type = "Standard"
kind = "MongoDB"
mongo_server_version = "7.0"
is_virtual_network_filter_enabled = true
virtual_network_rule {
id = azurerm_subnet.subnet.id
}
capabilities {
name = "EnableMongo"
}
consistency_policy {
consistency_level = "Session"
}
geo_location {
failover_priority = 0
location = azurerm_resource_group.resource_group.location
}
depends_on = [azurerm_resource_group.resource_group]
}
resource "azurerm_cosmosdb_mongo_collection" "cosmosdb_mongo_collection" {
name = var.mongo_database_name
resource_group_name = azurerm_resource_group.resource_group.name
account_name = azurerm_cosmosdb_account.cosmosdb_account.name
database_name = var.mongo_database_name
index {
keys = ["_id"]
unique = true
}
depends_on = [azurerm_cosmosdb_account.cosmosdb_account]
}

14
opentofu/azure/main.tf Normal file
View file

@ -0,0 +1,14 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 4.0"
}
}
required_version = ">= 1.0"
}
provider "azurerm" {
features {}
subscription_id = var.subscription_id
}

87
opentofu/azure/network.tf Normal file
View file

@ -0,0 +1,87 @@
# Create Virtual Network
resource "azurerm_virtual_network" "vnet" {
name = var.vnet_name
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
address_space = ["10.0.0.0/16"]
}
# Create Subnet
resource "azurerm_subnet" "subnet" {
name = var.subnet_name
resource_group_name = azurerm_resource_group.resource_group.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = ["10.0.1.0/24"]
service_endpoints = ["Microsoft.AzureCosmosDB"]
}
# Create Public IP Address
resource "azurerm_public_ip" "public_ip" {
name = var.public_ip_name
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
allocation_method = "Static"
domain_name_label = var.dns
}
resource "azurerm_network_security_group" "nsg" {
name = var.nsg_name
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
security_rule {
name = "SSH"
priority = 1000
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = var.nsg_ssh_ip_range
destination_address_prefix = "*"
}
security_rule {
name = "HTTP"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "80"
source_address_prefix = var.nsg_http_ip_range
destination_address_prefix = "*"
}
security_rule {
name = "HTTPS"
priority = 1002
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "443"
source_address_prefix = var.nsg_https_ip_range
destination_address_prefix = "*"
}
}
# Create Network Interface
resource "azurerm_network_interface" "nic" {
name = var.network_interface_name
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.subnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.public_ip.id
}
}
resource "azurerm_network_interface_security_group_association" "example" {
network_interface_id = azurerm_network_interface.nic.id
network_security_group_id = azurerm_network_security_group.nsg.id
}

View file

@ -0,0 +1,5 @@
# Create Resource Group
resource "azurerm_resource_group" "resource_group" {
name = var.resource_group_name
location = var.location
}

74
opentofu/azure/storage.tf Normal file
View file

@ -0,0 +1,74 @@
resource "azurerm_storage_account" "storage_account" {
name = var.config_volume_storage_account_name
resource_group_name = azurerm_resource_group.resource_group.name
location = azurerm_resource_group.resource_group.location
account_tier = "Standard"
account_replication_type = "LRS"
depends_on = [azurerm_resource_group.resource_group]
}
resource "azurerm_storage_share" "backend_storage_share" {
name = var.backend_storage_share_name
storage_account_name = azurerm_storage_account.storage_account.name
quota = 1
depends_on = [azurerm_storage_account.storage_account]
}
resource "null_resource" "upload_file" {
provisioner "local-exec" {
command = <<EOT
az storage file upload \
--account-name ${azurerm_storage_account.storage_account.name} \
--share-name ${azurerm_storage_share.backend_storage_share.name} \
--source ../auth_config.json \
--path auth_config.json
EOT
}
depends_on = [azurerm_storage_share.backend_storage_share]
}
locals {
# Get the current timestamp (UTC)
current_timestamp = timestamp()
start_time = local.current_timestamp
expiry_time = timeadd(local.current_timestamp, "1h")
}
data "azurerm_storage_account_sas" "storage_access" {
connection_string = azurerm_storage_account.storage_account.primary_connection_string
signed_version = "2022-11-02"
services {
file = true
blob = false
queue = false
table = false
}
resource_types {
object = true
container = false
service = false
}
permissions {
read = true
write = false
delete = false
list = true
add = false
create = false
update = false
process = false
tag = false
filter = false
}
start = local.start_time
expiry = local.expiry_time
depends_on = [null_resource.upload_file]
}

View file

@ -0,0 +1,7 @@
subscription_id = "subscription_id"
backend_session_secret = "secret"
backend_email_sender = "mail@mail.com"
backend_email_password = "password"
backend_jwt_secret = "jwt_secret"
vm_user = "username"
vm_password = "password"

214
opentofu/azure/variables.tf Normal file
View file

@ -0,0 +1,214 @@
variable "subscription_id" {
description = "The azure subscription id"
type = string
}
variable "resource_group_name" {
description = "The name of the resource group"
type = string
default = "evaluetonsavoir"
}
variable "location" {
description = "The location for resources"
type = string
default = "Canada Central"
}
variable "frontend_port" {
description = "The frontend port"
type = number
default = 5173
}
variable "backend_port" {
description = "The backend port"
type = number
default = 3000
}
variable "backend_use_port" {
description = "If true use port in the backend, else no"
type = bool
default = false
}
variable "backend_use_auth_student" {
description = "If true student need to authenticate, else no"
type = bool
default = false
}
variable "backend_session_secret" {
description = "The backend session secret"
type = string
}
variable "backend_email_service" {
description = "The name of the service use for sending email"
type = string
default = "gmail"
}
variable "backend_email_sender" {
description = "The email address used to send email"
type = string
}
variable "backend_email_password" {
description = "The email password"
type = string
}
variable "backend_jwt_secret" {
description = "The secret used to sign the jwt"
type = string
}
variable "backend_storage_share_name" {
description = "The backend volume share name"
type = string
default = "auth-config-share"
}
variable "config_volume_storage_account_name" {
description = "The volume storage account name"
type = string
default = "evaluetonsavoirstorage"
}
variable "mongo_database_name" {
description = "The name of the database"
type = string
default = "evaluetonsavoir"
}
variable "cosmosdb_account_name" {
description = "The name of the cosmosdb account"
type = string
default = "evaluetonsavoircosmosdb"
}
variable "vnet_name" {
description = "The name of the virtual network"
type = string
default = "evaluetonsavoirVnet"
}
variable "subnet_name" {
description = "The name of the subnet"
type = string
default = "evaluetonsavoirSubnet"
}
variable "public_ip_name" {
description = "The name of the public ip"
type = string
default = "evaluetonsavoirPublicIp"
}
variable "nsg_name" {
description = "The name of the network security group"
type = string
default = "evaluetonsavoirnsg"
}
variable "nsg_ssh_ip_range" {
description = "The ip range that can access to the port 22 using the network security group"
type = string
default = "0.0.0.0/0"
}
variable "nsg_http_ip_range" {
description = "The ip range that can access to the port 80 using the network security group"
type = string
default = "0.0.0.0/0"
}
variable "nsg_https_ip_range" {
description = "The ip range that can access to the port 443 using the network security group"
type = string
default = "0.0.0.0/0"
}
variable "network_interface_name" {
description = "The name of the network interface"
type = string
default = "evaluetonsavoirNetworkInterface"
}
variable "dns" {
description = "The dns of the public ip"
type = string
default = "evaluetonsavoir"
}
variable "vm_name" {
description = "The name of the virtual machine"
type = string
default = "evaluetonsavoir"
}
variable "vm_size" {
description = "The size of the virtual machine"
type = string
default = "Standard_B2s"
}
variable "vm_user" {
description = "The username of the virtual machine"
type = string
}
variable "vm_password" {
description = "The password of the virtual machine"
type = string
}
variable "vm_os_disk_name" {
description = "The name of the os disk of the virtual machine"
type = string
default = "evaluetonsavoirOsDisk"
}
variable "vm_os_disk_type" {
description = "The type of the os disk of the virtual machine"
type = string
default = "Standard_LRS"
}
variable "vm_image_publisher" {
description = "The publisher of the image of the virtual machine"
type = string
default = "Canonical"
}
variable "vm_image_offer" {
description = "The id of the image of the virtual machine"
type = string
default = "0001-com-ubuntu-server-jammy"
}
variable "vm_image_plan" {
description = "The plan of the image of the virtual machine"
type = string
default = "22_04-lts"
}
variable "vm_image_version" {
description = "The version of the image of the virtual machine"
type = string
default = "latest"
}
variable "docker_compose_url" {
description = "The url from where the docker compose file is downloaded"
type = string
default = "https://raw.githubusercontent.com/ets-cfuhrman-pfe/EvalueTonSavoir/refs/heads/main/opentofu/docker-compose.yaml"
}
variable "quizroom_image" {
description = "The image of the quiz room"
type = string
default = "ghrc.io/fuhrmanator/evaluetonsavoir-quizroom:latest"
}

View file

@ -0,0 +1,80 @@
services:
frontend:
image: ghcr.io/ets-cfuhrman-pfe/evaluetonsavoir-frontend:latest
container_name: frontend
ports:
- "5173:5173"
environment:
VITE_BACKEND_URL: ${VITE_BACKEND_URL:-http://localhost:3000}
networks:
- quiz_network
restart: always
backend:
image: ghcr.io/ets-cfuhrman-pfe/evaluetonsavoir-backend:latest
container_name: backend
ports:
- "3000:3000"
environment:
PORT: ${PORT:-3000}
MONGO_URI: ${MONGO_URI:-mongodb://mongo:27017/evaluetonsavoir}
MONGO_DATABASE: ${MONGO_DATABASE:-evaluetonsavoir}
EMAIL_SERVICE: ${EMAIL_SERVICE:-gmail}
SENDER_EMAIL: ${SENDER_EMAIL:-infoevaluetonsavoir@gmail.com}
EMAIL_PSW: ${EMAIL_PSW:-'vvml wmfr dkzb vjzb'}
JWT_SECRET: ${JWT_SECRET:-haQdgd2jp09qb897GeBZyJetC8ECSpbFJe}
FRONTEND_URL: ${FRONTEND_URL:-http://localhost:5173}
SESSION_Secret: ${SESSION_Secret:-'lookMomImQuizzing'}
SITE_URL: ${SITE_URL:-http://localhost}
FRONTEND_PORT: ${FRONTEND_PORT:-5173}
USE_PORTS: ${USE_PORTS:-false}
AUTHENTICATED_ROOMS: ${AUTHENTICATED_ROOMS:-false}
QUIZROOM_IMAGE: ${QUIZROOM_IMAGE:-ghrc.io/fuhrmanator/evaluetonsavoir-quizroom:latest}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./auth_config.json:/usr/src/app/serveur/auth_config.json
networks:
- quiz_network
restart: always
quizroom:
image: ghcr.io/ets-cfuhrman-pfe/evaluetonsavoir-quizroom:latest
container_name: quizroom
ports:
- "4500:4500"
depends_on:
- backend
networks:
- quiz_network
restart: always
nginx:
image: ghcr.io/ets-cfuhrman-pfe/evaluetonsavoir-router:latest
container_name: nginx
ports:
- "80:80"
depends_on:
- backend
- frontend
networks:
- quiz_network
restart: always
watchtower:
image: containrrr/watchtower
container_name: watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- TZ=America/Montreal
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_DEBUG=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_SCHEDULE=0 0 5 * * * # At 5 am everyday
restart: always
networks:
quiz_network:
name: evaluetonsavoir_quiz_network
driver: bridge

2
quizRoom/.dockerignore Normal file
View file

@ -0,0 +1,2 @@
Dockerfile
docker-compose.yml

32
quizRoom/Dockerfile Normal file
View file

@ -0,0 +1,32 @@
# Use the Node base image
FROM node:18 AS quizroom
ENV PORT=4500
ENV ROOM_ID=000000
# Create a working directory
WORKDIR /usr/src/app
# Copy package.json and package-lock.json (if available) and install dependencies
COPY package*.json ./
RUN npm install
# Copy the rest of the source code to the container
COPY . .
# Ensure healthcheck.sh has execution permissions
COPY healthcheck.sh /usr/src/app/healthcheck.sh
RUN chmod +x /usr/src/app/healthcheck.sh
# Build the TypeScript code
RUN npm run build
# Expose WebSocket server port
EXPOSE ${PORT}
# Add healthcheck
HEALTHCHECK --interval=30s --timeout=30s --start-period=30s --retries=3 \
CMD /usr/src/app/healthcheck.sh
# Start the server using the compiled JavaScript file
CMD ["node", "dist/app.js"]

57
quizRoom/app.ts Normal file
View file

@ -0,0 +1,57 @@
import http from "http";
import { Server, ServerOptions } from "socket.io";
import { setupWebsocket } from "./socket/setupWebSocket";
import dotenv from "dotenv";
import express from "express";
import os from "os"; // Import the os module
// Load environment variables
dotenv.config();
const port = process.env.PORT || 4500;
const roomId = process.env.ROOM_ID;
console.log(`I am: /api/room/${roomId}/socket`);
// Create Express app for health check
const app = express();
const server = http.createServer(app);
// Health check endpoint
app.get('/health', (_, res) => {
try {
if (io.engine?.clientsCount !== undefined) {
res.status(200).json({
status: 'healthy',
path: `/api/room/${roomId}/socket`,
connections: io.engine.clientsCount,
uptime: process.uptime()
});
} else {
throw new Error('Socket.io server not initialized');
}
} catch (error: Error | any) {
res.status(500).json({
status: 'unhealthy',
error: error.message
});
}
});
const ioOptions: Partial<ServerOptions> = {
path: `/api/room/${roomId}/socket`,
cors: {
origin: "*",
methods: ["GET", "POST"],
credentials: true,
},
};
const io = new Server(server, ioOptions);
// Initialize WebSocket setup
setupWebsocket(io);
server.listen(port, () => {
console.log(`WebSocket server is running on port ${port}`);
});

View file

@ -0,0 +1,21 @@
version: '3.8'
services:
quizroom:
build:
context: .
args:
- PORT=${PORT:-4500}
ports:
- "${PORT:-4500}:${PORT:-4500}"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- PORT=${PORT:-4500}
- ROOM_ID=${ROOM_ID}
healthcheck:
test: curl -f http://localhost:${PORT:-4500}/health || exit 1
interval: 30s
timeout: 30s
retries: 3
start_period: 30s

2
quizRoom/healthcheck.sh Normal file
View file

@ -0,0 +1,2 @@
#!/bin/bash
curl -f "http://0.0.0.0:${PORT}/health" || exit 1

1595
quizRoom/package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

27
quizRoom/package.json Normal file
View file

@ -0,0 +1,27 @@
{
"name": "quizroom",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"start": "node dist/app.js",
"build": "tsc",
"dev": "ts-node app.ts"
},
"keywords": [],
"author": "",
"license": "ISC",
"description": "",
"devDependencies": {
"@types/dockerode": "^3.3.32",
"@types/express": "^5.0.0",
"ts-node": "^10.9.2",
"typescript": "^5.6.3"
},
"dependencies": {
"dockerode": "^4.0.2",
"dotenv": "^16.4.5",
"express": "^4.21.1",
"http": "^0.0.1-security",
"socket.io": "^4.8.1"
}
}

View file

@ -0,0 +1,2 @@
ROOM_ID=123456
PORT=4500

View file

@ -0,0 +1,242 @@
import { Server, Socket } from "socket.io";
import Docker from 'dockerode';
import fs from 'fs';
const MAX_USERS_PER_ROOM = 60;
const MAX_TOTAL_CONNECTIONS = 2000;
export const setupWebsocket = (io: Server): void => {
let totalConnections = 0;
io.on("connection", (socket: Socket) => {
if (totalConnections >= MAX_TOTAL_CONNECTIONS) {
console.log("Connection limit reached. Disconnecting client.");
socket.emit("join-failure", "Le nombre maximum de connexions a été atteint");
socket.disconnect(true);
return;
}
totalConnections++;
console.log("A user connected:", socket.id, "| Total connections:", totalConnections);
socket.on("create-room", (sentRoomName) => {
// Ensure sentRoomName is a string before applying toUpperCase()
const roomName = (typeof sentRoomName === "string" && sentRoomName.trim() !== "")
? sentRoomName.toUpperCase()
: generateRoomName();
console.log(`Created room with name: ${roomName}`);
if (!io.sockets.adapter.rooms.get(roomName)) {
socket.join(roomName);
socket.emit("create-success", roomName);
} else {
socket.emit("create-failure");
}
});
socket.on("join-room", ({ enteredRoomName, username }: { enteredRoomName: string; username: string }) => {
if (io.sockets.adapter.rooms.has(enteredRoomName)) {
const clientsInRoom = io.sockets.adapter.rooms.get(enteredRoomName)?.size || 0;
if (clientsInRoom <= MAX_USERS_PER_ROOM) {
socket.join(enteredRoomName);
socket.to(enteredRoomName).emit("user-joined", { id: socket.id, name: username, answers: [] });
socket.emit("join-success");
} else {
socket.emit("join-failure", "La salle est remplie");
}
} else {
socket.emit("join-failure", "Le nom de la salle n'existe pas");
}
});
socket.on("next-question", ({ roomName, question }: { roomName: string; question: string }) => {
socket.to(roomName).emit("next-question", question);
});
socket.on("launch-student-mode", ({ roomName, questions }: { roomName: string; questions: string[] }) => {
socket.to(roomName).emit("launch-student-mode", questions);
});
socket.on("end-quiz", ({ roomName }: { roomName: string }) => {
socket.to(roomName).emit("end-quiz");
});
socket.on("message", (data: string) => {
console.log("Received message from", socket.id, ":", data);
});
socket.on("disconnect", () => {
totalConnections--;
console.log("A user disconnected:", socket.id, "| Total connections:", totalConnections);
for (const [room] of io.sockets.adapter.rooms) {
if (room !== socket.id) {
io.to(room).emit("user-disconnected", socket.id);
}
}
});
socket.on("submit-answer", ({
roomName,
username,
answer,
idQuestion,
}: {
roomName: string;
username: string;
answer: string;
idQuestion: string;
}) => {
socket.to(roomName).emit("submit-answer-room", {
idUser: socket.id,
username,
answer,
idQuestion,
});
});
socket.on("error", (error) => {
console.error("WebSocket server error:", error);
});
// Stress Testing
socket.on("message-from-teacher", ({ roomName, message }: { roomName: string; message: string }) => {
console.log(`Message reçu dans la salle ${roomName} : ${message}`);
socket.to(roomName).emit("message-sent-teacher", { message });
});
socket.on("message-from-student", ({ roomName, message }: { roomName: string; message: string }) => {
console.log(`Message reçu dans la salle ${roomName} : ${message}`);
socket.to(roomName).emit("message-sent-student", { message });
});
interface ContainerStats {
containerId: string;
containerName: string;
memoryUsedMB: number | null;
memoryUsedPercentage: number | null;
cpuUsedPercentage: number | null;
error?: string;
}
class ContainerMetrics {
private docker: Docker;
private containerName: string;
private bytesToMB(bytes: number): number {
return Math.round(bytes / (1024 * 1024));
}
constructor() {
this.docker = new Docker({
socketPath: process.platform === 'win32' ? '//./pipe/docker_engine' : '/var/run/docker.sock'
});
this.containerName = `room_${process.env.ROOM_ID}`;
}
private async getContainerNetworks(containerId: string): Promise<string[]> {
const container = this.docker.getContainer(containerId);
const info = await container.inspect();
return Object.keys(info.NetworkSettings.Networks);
}
public async getAllContainerStats(): Promise<ContainerStats[]> {
try {
// First get our container to find its networks
const ourContainer = await this.docker.listContainers({
all: true,
filters: { name: [this.containerName] }
});
if (!ourContainer.length) {
throw new Error(`Container ${this.containerName} not found`);
}
const ourNetworks = await this.getContainerNetworks(ourContainer[0].Id);
// Get all containers
const allContainers = await this.docker.listContainers();
// Get stats for containers on the same networks
const containerStats = await Promise.all(
allContainers.map(async (container): Promise<ContainerStats | null> => {
try {
const containerNetworks = await this.getContainerNetworks(container.Id);
// Check if container shares any network with our container
if (!containerNetworks.some(network => ourNetworks.includes(network))) {
return null;
}
const stats = await this.docker.getContainer(container.Id).stats({ stream: false });
const memoryStats = {
usage: stats.memory_stats.usage,
limit: stats.memory_stats.limit || 0,
percent: stats.memory_stats.limit ? (stats.memory_stats.usage / stats.memory_stats.limit) * 100 : 0
};
const cpuDelta = stats.cpu_stats?.cpu_usage?.total_usage - (stats.precpu_stats?.cpu_usage?.total_usage || 0);
const systemDelta = stats.cpu_stats?.system_cpu_usage - (stats.precpu_stats?.system_cpu_usage || 0);
const cpuPercent = systemDelta > 0 ? (cpuDelta / systemDelta) * (stats.cpu_stats?.online_cpus || 1) * 100 : 0;
return {
containerId: container.Id,
containerName: container.Names[0].replace(/^\//, ''),
memoryUsedMB: this.bytesToMB(memoryStats.usage),
memoryUsedPercentage: memoryStats.percent,
cpuUsedPercentage: cpuPercent
};
} catch (error) {
return {
containerId: container.Id,
containerName: container.Names[0].replace(/^\//, ''),
memoryUsedMB: null,
memoryUsedPercentage: null,
cpuUsedPercentage: null,
error: error instanceof Error ? error.message : String(error)
};
}
})
);
// Change the filter to use proper type predicate
return containerStats.filter((stats): stats is ContainerStats => stats !== null);
} catch (error) {
console.error('Stats error:', error);
return [{
containerId: 'unknown',
containerName: 'unknown',
memoryUsedMB: null,
memoryUsedPercentage: null,
cpuUsedPercentage: null,
error: error instanceof Error ? error.message : String(error)
}];
}
}
}
const containerMetrics = new ContainerMetrics();
socket.on("get-usage", async () => {
try {
const usageData = await containerMetrics.getAllContainerStats();
socket.emit("usage-data", usageData);
} catch (error) {
socket.emit("error", { message: "Failed to retrieve usage data" });
}
});
});
const generateRoomName = (length = 6): string => {
const characters = "0123456789";
let result = "";
for (let i = 0; i < length; i++) {
result += characters.charAt(Math.floor(Math.random() * characters.length));
}
return result;
};
};

14
quizRoom/tsconfig.json Normal file
View file

@ -0,0 +1,14 @@
{
"compilerOptions": {
"target": "ES6",
"module": "commonjs",
"outDir": "./dist",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true
},
"include": ["./**/*"],
"exclude": ["node_modules"]
}

View file

@ -8,6 +8,10 @@ RUN npm install
COPY ./ .
EXPOSE 4400
ENV PORT=3000
EXPOSE ${PORT}
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:${PORT}/health || exit 1
CMD ["npm", "run", "start"]

View file

@ -3,10 +3,6 @@ const express = require("express");
const http = require("http");
const dotenv = require('dotenv');
// Import Sockets
const { setupWebsocket } = require("./socket/socket");
const { Server } = require("socket.io");
// instantiate the db
const db = require('./config/db.js');
// instantiate the models
@ -18,6 +14,13 @@ const users = require('./models/users.js');
const userModel = new users(db, foldersModel);
const images = require('./models/images.js');
const imageModel = new images(db);
const {RoomRepository} = require('./models/room.js');
const roomRepModel = new RoomRepository(db);
// Instantiate the controllers
const QuizProviderOptions = {
provider: 'docker'
};
// instantiate the controllers
const usersController = require('./controllers/users.js');
@ -28,18 +31,23 @@ const quizController = require('./controllers/quiz.js');
const quizControllerInstance = new quizController(quizModel, foldersModel);
const imagesController = require('./controllers/images.js');
const imagesControllerInstance = new imagesController(imageModel);
const roomsController = require('./controllers/rooms.js');
const roomsControllerInstance = new roomsController(QuizProviderOptions,roomRepModel);
// export the controllers
module.exports.users = usersControllerInstance;
module.exports.folders = foldersControllerInstance;
module.exports.quizzes = quizControllerInstance;
module.exports.images = imagesControllerInstance;
module.exports.rooms = roomsControllerInstance;
//import routers (instantiate controllers as side effect)
const userRouter = require('./routers/users.js');
const folderRouter = require('./routers/folders.js');
const quizRouter = require('./routers/quiz.js');
const imagesRouter = require('./routers/images.js');
const roomRouter = require('./routers/rooms.js');
const healthRouter = require('./routers/health.js');
// Setup environment
dotenv.config();
@ -50,27 +58,10 @@ const app = express();
const cors = require("cors");
const bodyParser = require('body-parser');
const configureServer = (httpServer, isDev) => {
return new Server(httpServer, {
path: "/socket.io",
cors: {
origin: "*",
methods: ["GET", "POST"],
credentials: true,
},
secure: !isDev, // true for https, false for http
});
};
// Start sockets (depending on the dev or prod environment)
let server = http.createServer(app);
let server = http.createServer(app);
let isDev = process.env.NODE_ENV === 'development';
console.log(`Environnement: ${process.env.NODE_ENV} (${isDev ? 'dev' : 'prod'})`);
const io = configureServer(server);
setupWebsocket(io);
app.use(cors());
app.use(bodyParser.urlencoded({ extended: true }));
app.use(bodyParser.json());
@ -80,6 +71,8 @@ app.use('/api/user', userRouter);
app.use('/api/folder', folderRouter);
app.use('/api/quiz', quizRouter);
app.use('/api/image', imagesRouter);
app.use('/api/room', roomRouter);
app.use('/health', healthRouter);
app.use(errorHandler);

View file

@ -0,0 +1,94 @@
const {Room} = require('../models/room.js');
const BaseRoomProvider = require('../roomsProviders/base-provider.js');
//const ClusterRoomProvider = require('../roomsProviders/cluster-provider.js');
const DockerRoomProvider = require('../roomsProviders/docker-provider.js');
//const KubernetesRoomProvider = require('../roomsProviders/kubernetes-provider');
const NB_CODE_CHARS = 6;
const NB_MS_UPDATE_ROOM = 1000;
const NB_MS_CLEANUP = 30000;
class RoomsController {
constructor(options = {}, roomRepository) {
this.provider = this.createProvider(
options.provider || process.env.ROOM_PROVIDER || 'cluster',
options.providerOptions,
roomRepository
);
this.roomRepository = roomRepository;
this.setupTasks();
}
createProvider(type, options, repository) {
switch (type) {
/*
case 'cluster':
return new ClusterRoomProvider(options, this.roomRepository);
*/
// Uncomment these as needed
case 'docker':
return new DockerRoomProvider(options, repository);
/*
case 'kubernetes':
return new KubernetesRoomProvider(options);
*/
default:
throw new Error(`Type d'approvisionement inconnu: ${type}`);
}
}
async setupTasks(){
await this.provider.syncInstantiatedRooms();
// Update rooms
setInterval(() => {
this.provider.updateRoomsInfo().catch(console.error);
}, NB_MS_UPDATE_ROOM);
// Cleanup rooms
setInterval(() => {
this.provider.cleanup().catch(console.error);
}, NB_MS_CLEANUP);
}
async createRoom(options = {}) {
let roomIdValid = false
let roomId;
while(!roomIdValid){
roomId = options.roomId || this.generateRoomId();
roomIdValid = !(await this.provider.getRoomInfo(roomId));
}
return await this.provider.createRoom(roomId,options);
}
async updateRoom(roomId, info) {
return await this.provider.updateRoomInfo(roomId, {});
}
async deleteRoom(roomId) {
return await this.provider.deleteRoom(roomId);
}
async getRoomStatus(roomId) {
return await this.provider.getRoomStatus(roomId);
}
async listRooms() {
return await this.provider.listRooms();
}
generateRoomId() {
const characters = "0123456789";
let result = "";
for (let i = 0; i < NB_CODE_CHARS; i++) {
result += characters.charAt(
Math.floor(Math.random() * characters.length)
);
}
return result;
}
}
module.exports = RoomsController;

View file

@ -22,7 +22,7 @@ class Token {
if (error) {
throw new AppError(UNAUTHORIZED_INVALID_TOKEN)
}
req.user = payload;
});

View file

@ -51,6 +51,7 @@ class Quiz {
await this.db.connect()
const conn = this.db.getConnection();
const quizCollection = conn.collection('files');
const quiz = await quizCollection.findOne({ _id: ObjectId.createFromHexString(quizId) });

87
server/models/room.js Normal file
View file

@ -0,0 +1,87 @@
class Room {
constructor(id, name, host, nbStudents = 0,) { // Default nbStudents to 0
this.id = id;
this.name = name;
if (!host.startsWith('http://') && !host.startsWith('https://')) {
host = 'http://' + host;
}
this.host = host;
this.nbStudents = nbStudents;
this.mustBeCleaned = false;
}
}
class RoomRepository {
constructor(db) {
this.db = db;
this.connection = null;
this.collection = null;
}
async init() {
if (!this.connection) {
await this.db.connect();
this.connection = this.db.getConnection();
}
if (!this.collection) this.collection = this.connection.collection('rooms');
}
async create(room) {
await this.init();
const existingRoom = await this.collection.findOne({ id: room.id });
if (existingRoom) {
throw new Error(`Érreur: la salle ${room.id} existe déja`);
}
const returnedId = await this.collection.insertOne(room);
return await this.collection.findOne({ _id: returnedId.insertedId });
}
async get(id) {
await this.init();
const existingRoom = await this.collection.findOne({ id: id });
if (!existingRoom) {
console.warn(`La sale avec l'identifiant ${id} n'as pas été trouvé.`);
return null;
}
return existingRoom;
}
async getAll() {
await this.init();
return await this.collection.find().toArray();
}
async update(room,roomId = null) {
await this.init();
const searchId = roomId ?? room.id;
const result = await this.collection.updateOne(
{ id: searchId },
{ $set: room },
{ upsert: false }
);
if (result.modifiedCount === 0) {
if (result.matchedCount > 0) {
return true; // Document exists but no changes needed
}
return false;
}
return true;
}
async delete(id) {
await this.init();
const result = await this.collection.deleteOne({ id: id });
if (result.deletedCount === 0) {
console.warn(`La salle ${id} n'as pas été trouvée pour éffectuer sa suppression.`);
return false;
}
return true;
}
}
module.exports = { Room, RoomRepository };

2596
server/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -13,18 +13,22 @@
"author": "",
"license": "MIT",
"dependencies": {
"@types/express": "^5.0.0",
"bcrypt": "^5.1.1",
"cors": "^2.8.5",
"dockerode": "^4.0.2",
"dotenv": "^16.4.4",
"express": "^4.18.2",
"jsonwebtoken": "^9.0.2",
"mongodb": "^6.3.0",
"multer": "^1.4.5-lts.1",
"net": "^1.0.2",
"nodemailer": "^6.9.9",
"socket.io": "^4.7.2",
"socket.io-client": "^4.7.2"
},
"devDependencies": {
"@types/node": "^22.8.4",
"cross-env": "^7.0.3",
"jest": "^29.7.0",
"jest-mock": "^29.7.0",

View file

@ -0,0 +1,75 @@
/**
* @template T
* @typedef {import('../../types/room').RoomInfo} RoomInfo
* @typedef {import('../../types/room').RoomOptions} RoomOptions
* @typedef {import('../../types/room').BaseProviderConfig} BaseProviderConfig
*/
const MIN_NB_SECONDS_BEFORE_CLEANUP = process.env.MIN_NB_SECONDS_BEFORE_CLEANUP || 60
class BaseRoomProvider {
constructor(config = {}, roomRepository) {
this.config = config;
this.roomRepository = roomRepository;
this.quiz_docker_image = process.env.QUIZROOM_IMAGE || "evaluetonsavoir-quizroom";
this.quiz_docker_port = process.env.QUIZROOM_PORT || 4500;
this.quiz_expose_port = process.env.QUIZROOM_EXPOSE_PORT || false;
}
async createRoom(roomId, options) {
throw new Error("Fonction non-implantée - classe abstraite");
}
async deleteRoom(roomId) {
throw new Error("Fonction non-implantée - classe abstraite");
}
async getRoomStatus(roomId) {
throw new Error("Fonction non-implantée - classe abstraite");
}
async listRooms() {
throw new Error("Fonction non-implantée - classe abstraite");
}
async cleanup() {
throw new Error("Fonction non-implantée - classe abstraite");
}
async syncInstantiatedRooms(){
throw new Error("Fonction non-implantée - classe abstraite");
}
async updateRoomsInfo() {
const rooms = await this.roomRepository.getAll();
for(var room of rooms){
const url = `${room.host}/health`;
try {
const response = await fetch(url);
if (!response.ok) {
room.mustBeCleaned = true;
await this.roomRepository.update(room);
continue;
}
const json = await response.json();
room.nbStudents = json.connections;
room.mustBeCleaned = room.nbStudents === 0 && json.uptime >MIN_NB_SECONDS_BEFORE_CLEANUP;
await this.roomRepository.update(room);
} catch (error) {
room.mustBeCleaned = true;
await this.roomRepository.update(room);
}
}
}
async getRoomInfo(roomId) {
const info = await this.roomRepository.get(roomId);
return info;
}
}
module.exports = BaseRoomProvider;

View file

@ -0,0 +1,110 @@
const cluster = require("node:cluster");
const { cpus } = require("node:os");
const BaseRoomProvider = require("./base-provider.js");
class ClusterRoomProvider extends BaseRoomProvider {
constructor(config = {}, roomRepository) {
super(config, roomRepository);
this.workers = new Map();
if (cluster.isPrimary) {
this.initializeCluster();
}
}
initializeCluster() {
const numCPUs = cpus().length;
for (let i = 0; i < numCPUs; i++) {
const worker = cluster.fork();
this.handleWorkerMessages(worker);
}
cluster.on("exit", (worker, code, signal) => {
console.log(`Worker ${worker.process.pid} died. Starting new worker...`);
const newWorker = cluster.fork();
this.handleWorkerMessages(newWorker);
});
}
handleWorkerMessages(worker) {
worker.on("message", async (msg) => {
if (msg.type === "room_status") {
await this.updateRoomInfo(msg.roomId, {
status: msg.status,
workerId: worker.id,
lastUpdate: Date.now(),
});
}
});
}
async createRoom(roomId, options = {}) {
const workerLoads = Array.from(this.workers.entries())
.map(([id, data]) => ({
id,
rooms: data.rooms.size,
}))
.sort((a, b) => a.rooms - b.rooms);
const workerId = workerLoads[0].id;
const worker = cluster.workers[workerId];
if (!worker) {
throw new Error("No available workers");
}
worker.send({ type: "create_room", roomId, options });
const roomInfo = {
roomId,
provider: "cluster",
status: "creating",
workerId,
pid: worker.process.pid,
createdAt: Date.now(),
};
await this.updateRoomInfo(roomId, roomInfo);
return roomInfo;
}
async deleteRoom(roomId) {
const roomInfo = await this.getRoomInfo(roomId);
if (roomInfo?.workerId && cluster.workers[roomInfo.workerId]) {
cluster.workers[roomInfo.workerId].send({
type: "delete_room",
roomId,
});
}
//await this.valkey.del(["room", roomId]);
}
async getRoomStatus(roomId) {
return await this.getRoomInfo(roomId);
}
async listRooms() {
let rooms = [];
/*
const keys = await this.valkey.hkeys("room:*");
const rooms = await Promise.all(
keys.map((key) => this.getRoomInfo(key.split(":")[1]))
);
*/
return rooms.filter((room) => room !== null);
}
async cleanup() {
const rooms = await this.listRooms();
const staleTimeout = 30000;
for (const room of rooms) {
if (Date.now() - (room.lastUpdate || room.createdAt) > staleTimeout) {
await this.deleteRoom(room.roomId);
}
}
}
}
module.exports = ClusterRoomProvider;

View file

@ -0,0 +1,275 @@
const Docker = require("dockerode");
const { Room } = require("../models/room.js");
const BaseRoomProvider = require("./base-provider.js");
class DockerRoomProvider extends BaseRoomProvider {
constructor(config, roomRepository) {
super(config, roomRepository);
const dockerSocket = process.env.DOCKER_SOCKET || "/var/run/docker.sock";
this.docker = new Docker({ socketPath: dockerSocket });
this.docker_network = process.env.QUIZ_NETWORK_NAME || 'evaluetonsavoir_quiz_network';
}
async syncInstantiatedRooms() {
let containers = await this.docker.listContainers();
containers = containers.filter(container => container.Image === this.quiz_docker_image);
const containerIds = new Set(containers.map(container => container.Id));
for (let container of containers) {
const container_name = container.Names[0].slice(1);
if (!container_name.startsWith("room_")) {
console.warn(`Le conteneur ${container_name} ne suit pas la convention de nommage, il sera supprimé.`);
const curContainer = this.docker.getContainer(container.Id);
await curContainer.stop();
await curContainer.remove();
containerIds.delete(container.Id);
console.warn(`Le conteneur ${container_name} a été supprimé.`);
}
else {
console.warn(`Conteneur orphelin trouvé : ${container_name}`);
const roomId = container_name.slice(5);
const room = await this.roomRepository.get(roomId);
if (!room) {
console.warn(`Le conteneur n'est pas dans notre base de données.`);
const containerInfo = await this.docker.getContainer(container.Id).inspect();
const containerIP = containerInfo.NetworkSettings.Networks.evaluetonsavoir_quiz_network.IPAddress;
const host = `${containerIP}:4500`;
console.warn(`Création de la salle ${roomId} dans notre base de donnée - hôte : ${host}`);
return await this.roomRepository.create(new Room(roomId, container_name, host));
}
console.warn(`La salle ${roomId} est déjà dans notre base de données.`);
}
}
}
async checkAndPullImage(imageName) {
try {
const images = await this.docker.listImages({ all: true });
//console.log('Images disponibles:', images.map(img => ({
// RepoTags: img.RepoTags || [],
// Id: img.Id
//})));
const imageExists = images.some(img => {
const tags = img.RepoTags || [];
return tags.includes(imageName) ||
tags.includes(`${imageName}:latest`) ||
img.Id.includes(imageName);
});
if (!imageExists) {
console.log(`L'image ${imageName} n'a pas été trouvée localement, tentative de téléchargement...`);
try {
await this.docker.pull(imageName);
console.log(`L'image ${imageName} a été téléchargée avec succès`);
} catch (pullError) {
const localImages = await this.docker.listImages({ all: true });
const foundLocally = localImages.some(img =>
(img.RepoTags || []).includes(imageName) ||
(img.RepoTags || []).includes(`${imageName}:latest`)
);
if (!foundLocally) {
throw new Error(`Impossible de trouver ou de télécharger l'image ${imageName}: ${pullError.message}`);
} else {
console.log(`L'image ${imageName} a été trouvée localement après vérification supplémentaire`);
}
}
} else {
console.log(`L'image ${imageName} a été trouvée localement`);
}
} catch (error) {
throw new Error(`Une erreur est survenue lors de la vérification/téléchargement de l'image ${imageName}: ${error.message}`);
}
}
async createRoom(roomId, options) {
const container_name = `room_${roomId}`;
try {
await this.checkAndPullImage(this.quiz_docker_image);
const containerConfig = {
Image: this.quiz_docker_image,
name: container_name,
HostConfig: {
NetworkMode: this.docker_network,
RestartPolicy: {
Name: 'unless-stopped'
},
Binds: [
'/var/run/docker.sock:/var/run/docker.sock'
]
},
Env: [
`ROOM_ID=${roomId}`,
`PORT=${this.quiz_docker_port}`,
...(options.env || [])
]
};
if (this.quiz_expose_port) {
containerConfig.ExposedPorts = {
[`${this.quiz_docker_port}/tcp`]: {}
};
containerConfig.HostConfig.PortBindings = {
[`${this.quiz_docker_port}/tcp`]: [{ HostPort: '' }] // Empty string for random port
};
}
const container = await this.docker.createContainer(containerConfig);
await container.start();
const containerInfo = await container.inspect();
const networkInfo = containerInfo.NetworkSettings.Networks[this.docker_network];
if (!networkInfo) {
throw new Error(`Le conteneur n'as pu se connecter au réseau: ${this.docker_network}`);
}
const containerIP = networkInfo.IPAddress;
const host = `http://${containerIP}:${this.quiz_docker_port}`;
let health = false;
let attempts = 0;
const maxAttempts = 15;
while (!health && attempts < maxAttempts) {
try {
const response = await fetch(`${host}/health`, {
timeout: 1000
});
if (response.ok) {
health = true;
console.log(`Le conteneur ${container_name} est tombé actif en ${attempts + 1} tentatives`);
} else {
throw new Error(`Health check failed with status ${response.status}`);
}
} catch (error) {
attempts++;
console.log(`Attente du conteneur: ${container_name} (tentative ${attempts}/${maxAttempts})`);
await new Promise(resolve => setTimeout(resolve, 1000));
}
}
if (!health) {
console.error(`Container ${container_name} failed health check after ${maxAttempts} attempts`);
await container.stop();
await container.remove();
throw new Error(`Room ${roomId} did not respond within acceptable timeout`);
}
return await this.roomRepository.create(new Room(roomId, container_name, host, 0));
} catch (error) {
console.error(`Échec de la création de la salle ${roomId}:`, error);
throw error;
}
}
async deleteRoom(roomId) {
const container_name = `room_${roomId}`;
await this.roomRepository.delete(roomId);
try {
const container = this.docker.getContainer(container_name);
const containerInfo = await container.inspect();
if (containerInfo) {
await container.stop();
await container.remove();
console.log(`Le conteneur pour la salle ${roomId} a été arrêté et supprimé.`);
}
} catch (error) {
if (error.statusCode === 404) {
console.warn(`Le conteneur pour la salle ${roomId} n'as pas été trouvé, la salle sera supprimée de la base de données.`);
} else {
console.error(`Erreur pour la salle ${roomId}:`, error);
throw new Error("La salle :${roomId} n'as pas pu être supprimée.");
}
}
console.log(`La salle ${roomId} a été supprimée.`);
}
async getRoomStatus(roomId) {
const room = await this.roomRepository.get(roomId);
if (!room) return null;
try {
const container = this.docker.getContainer(room.containerId || `room_${roomId}`);
const info = await container.inspect();
const updatedRoomInfo = {
...room,
status: info.State.Running ? "running" : "terminated",
containerStatus: {
Running: info.State.Running,
StartedAt: info.State.StartedAt,
FinishedAt: info.State.FinishedAt,
},
lastUpdate: Date.now(),
};
await this.roomRepository.update(updatedRoomInfo);
return updatedRoomInfo;
} catch (error) {
if (error.statusCode === 404) {
console.warn(`Le conteneur pour la salle ${roomId} n'as pas été trouvé, il sera mis en état "terminé".`);
const terminatedRoomInfo = {
...room,
status: "terminated",
containerStatus: {
Running: false,
StartedAt: room.containerStatus?.StartedAt || null,
FinishedAt: Date.now(),
},
lastUpdate: Date.now(),
};
await this.roomRepository.update(terminatedRoomInfo);
return terminatedRoomInfo;
} else {
console.error(`Une érreur s'est produite lors de l'obtention de l'état de la salle ${roomId}:`, error);
return null;
}
}
}
async listRooms() {
const rooms = await this.roomRepository.getAll();
return rooms;
}
async cleanup() {
const rooms = await this.roomRepository.getAll();
for (let room of rooms) {
if (room.mustBeCleaned) {
try {
await this.deleteRoom(room.id);
} catch (error) {
console.error(`Érreur lors du néttoyage de la salle ${room.id}:`, error);
}
}
}
let containers = await this.docker.listContainers();
containers = containers.filter(container => container.Image === this.quiz_docker_image);
const roomIds = rooms.map(room => room.id);
for (let container of containers) {
if (!roomIds.includes(container.Names[0].slice(6))) {
const curContainer = this.docker.getContainer(container.Id);
await curContainer.stop();
await curContainer.remove();
console.warn(`Conteneur orphelin ${container.Names[0]} supprimé.`);
}
}
}
}
module.exports = DockerRoomProvider;

20
server/routers/health.js Normal file
View file

@ -0,0 +1,20 @@
const express = require('express');
const router = express.Router();
router.get('/', async (req, res) => {
try {
const dbStatus = await require('../config/db.js').getConnection() ? 'connected' : 'disconnected';
res.json({
status: 'healthy',
timestamp: new Date(),
db: dbStatus
});
} catch (error) {
res.status(500).json({
status: 'unhealthy',
error: error.message
});
}
});
module.exports = router;

54
server/routers/rooms.js Normal file
View file

@ -0,0 +1,54 @@
const { Router } = require("express");
const roomsController = require('../app.js').rooms;
const jwt = require('../middleware/jwtToken.js');
const router = Router();
router.get("/",jwt.authenticate, async (req, res)=> {
try {
const data = await roomsController.listRooms();
res.json(data);
} catch (error) {
res.status(500).json({ error: "Échec de listage des salle" });
}
});
router.post("/",jwt.authenticate, async (req, res) => {
try {
const data = await roomsController.createRoom();
res.json(data);
} catch (error) {
console.log(error);
res.status(500).json({ error: "Échec de la création de salle :" + error });
}
});
router.put("/:id",jwt.authenticate, async (req, res) => {
try {
const data = await roomsController.updateRoom(req.params.id);
res.json(data);
} catch (error) {
res.status(500).json({ error: "Échec de la mise a jour de salle : "+error });
}
});
router.delete("/:id",jwt.authenticate, async (req, res) => {
try {
const data = await roomsController.deleteRoom(req.params.id);
res.json(data);
} catch (error) {
res.status(500).json({ error: `Échec de suppression de la salle: `+error });
}
});
router.get("/:id", async (req, res) => {
try {
const data = await roomsController.getRoomStatus(req.params.id);
res.json(data);
} catch (error) {
res.status(500).json({ error: "Impossible d'afficher les informations de la salle: " + error });
}
});
module.exports = router;

View file

@ -1,125 +0,0 @@
const MAX_USERS_PER_ROOM = 60;
const MAX_TOTAL_CONNECTIONS = 2000;
const setupWebsocket = (io) => {
let totalConnections = 0;
io.on("connection", (socket) => {
if (totalConnections >= MAX_TOTAL_CONNECTIONS) {
console.log("Connection limit reached. Disconnecting client.");
socket.emit(
"join-failure",
"Le nombre maximum de connexions a été atteint"
);
socket.disconnect(true);
return;
}
totalConnections++;
// console.log(
// "A user connected:",
// socket.id,
// "| Total connections:",
// totalConnections
// );
socket.on("create-room", (sentRoomName) => {
if (sentRoomName) {
const roomName = sentRoomName.toUpperCase();
if (!io.sockets.adapter.rooms.get(roomName)) {
socket.join(roomName);
socket.emit("create-success", roomName);
} else {
socket.emit("create-failure");
}
} else {
const roomName = generateRoomName();
if (!io.sockets.adapter.rooms.get(roomName)) {
socket.join(roomName);
socket.emit("create-success", roomName);
} else {
socket.emit("create-failure");
}
}
});
socket.on("join-room", ({ enteredRoomName, username }) => {
if (io.sockets.adapter.rooms.has(enteredRoomName)) {
const clientsInRoom =
io.sockets.adapter.rooms.get(enteredRoomName).size;
if (clientsInRoom <= MAX_USERS_PER_ROOM) {
const newStudent = {
id: socket.id,
name: username,
answers: [],
};
socket.join(enteredRoomName);
socket
.to(enteredRoomName)
.emit("user-joined", newStudent);
socket.emit("join-success");
} else {
socket.emit("join-failure", "La salle est remplie");
}
} else {
socket.emit("join-failure", "Le nom de la salle n'existe pas");
}
});
socket.on("next-question", ({ roomName, question }) => {
// console.log("next-question", roomName, question);
socket.to(roomName).emit("next-question", question);
});
socket.on("launch-student-mode", ({ roomName, questions }) => {
socket.to(roomName).emit("launch-student-mode", questions);
});
socket.on("end-quiz", ({ roomName }) => {
socket.to(roomName).emit("end-quiz");
});
socket.on("message", (data) => {
console.log("Received message from", socket.id, ":", data);
});
socket.on("disconnect", () => {
totalConnections--;
console.log(
"A user disconnected:",
socket.id,
"| Total connections:",
totalConnections
);
for (const [room] of io.sockets.adapter.rooms) {
if (room !== socket.id) {
io.to(room).emit("user-disconnected", socket.id);
}
}
});
socket.on("submit-answer", ({ roomName, username, answer, idQuestion }) => {
socket.to(roomName).emit("submit-answer-room", {
idUser: socket.id,
username,
answer,
idQuestion,
});
});
});
const generateRoomName = (length = 6) => {
const characters = "0123456789";
let result = "";
for (let i = 0; i < length; i++) {
result += characters.charAt(
Math.floor(Math.random() * characters.length)
);
}
return result;
};
};
module.exports = { setupWebsocket };

View file

@ -0,0 +1,2 @@
node_modules
.env

View file

@ -0,0 +1,16 @@
# Target url
BASE_URL=http://host.docker.internal
# Connection account
USER_EMAIL=admin@admin.com
USER_PASSWORD=admin
# Stress test parameters
NUMBER_ROOMS=5
USERS_PER_ROOM=60
# Optionnal
MAX_MESSAGES_ROUND=20
CONVERSATION_INTERVAL=1000
MESSAGE_RESPONSE_TIMEOUT=5000
BATCH_DELAY=1000
BATCH_SIZE=10

View file

@ -0,0 +1,13 @@
FROM node:18
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY . .
VOLUME /app/output
CMD ["node", "main.js"]

51
test/stressTest/README.md Normal file
View file

@ -0,0 +1,51 @@
# Test de Charge - EvalueTonSavoir
Ce conteneur permet d'exécuter des tests de charge sur l'application EvalueTonSavoir.
## Prérequis
- Docker
- Docker Compose
## Configuration
1. Créez un fichier `.env` à partir du modèle `.env.example`:
```bash
copy .env.example .env
```
2. Modifiez les variables dans le fichier .env:
```bash
# URL de l'application cible
BASE_URL=http://votre-url.com
# Compte de connexion
USER_EMAIL=admin@admin.com
USER_PASSWORD=admin
# Paramètres du test de charge
NUMBER_ROOMS=5 # Nombre de salles à créer
USERS_PER_ROOM=60 # Nombre d'utilisateurs par salle
```
#### Paramètres optionnels
Dans le fichier .env, vous pouvez aussi configurer:
```bash
MAX_MESSAGES_ROUND=20 # Messages maximum par cycle
CONVERSATION_INTERVAL=1000 # Intervalle entre les messages (ms)
MESSAGE_RESPONSE_TIMEOUT=5000 # Timeout des réponses (ms)
BATCH_DELAY=1000 # Délai entre les lots (ms)
BATCH_SIZE=10 # Taille des lots d'utilisateurs
```
## Démarrage
Pour lancer le test de charge:
Les résultats seront disponibles dans le dossier output/.
```bash
docker compose up
```

View file

@ -0,0 +1,46 @@
export class TestMetrics {
constructor() {
this.reset();
}
reset() {
this.roomsCreated = 0;
this.roomsFailed = 0;
this.usersConnected = 0;
this.userConnectionsFailed = 0;
this.messagesAttempted = 0;
this.messagesSent = 0;
this.messagesReceived = 0;
this.errors = new Map();
}
logError(category, error) {
if (!this.errors.has(category)) {
this.errors.set(category, []);
}
this.errors.get(category).push(error);
}
getSummary() {
return {
rooms: {
created: this.roomsCreated,
failed: this.roomsFailed,
total: this.roomsCreated + this.roomsFailed
},
users: {
connected: this.usersConnected,
failed: this.userConnectionsFailed,
total: this.usersConnected + this.userConnectionsFailed
},
messages: {
attempted: this.messagesAttempted,
sent: this.messagesSent,
received: this.messagesReceived
},
errors: Object.fromEntries(
Array.from(this.errors.entries()).map(([k, v]) => [k, v.length])
)
};
}
}

View file

@ -0,0 +1,83 @@
import { io } from "socket.io-client";
export class RoomParticipant {
constructor(username, roomName) {
this.username = username;
this.roomName = roomName;
this.socket = null;
this.maxRetries = 3;
this.retryDelay = 1000;
}
async connectToRoom(baseUrl) {
let retries = 0;
const maxRetries = 2;
const retryDelay = 2000;
const cleanup = () => {
if (this.socket) {
this.socket.removeAllListeners();
this.socket.disconnect();
this.socket = null;
}
};
while (retries < maxRetries) {
try {
const socket = io(baseUrl, {
path: `/api/room/${this.roomName}/socket`,
transports: ['websocket'],
timeout: 8000,
reconnection: false,
forceNew: true
});
const result = await new Promise((resolve, reject) => {
const timeout = setTimeout(() => {
cleanup();
reject(new Error('Connection timeout'));
}, 8000);
socket.on('connect', () => {
clearTimeout(timeout);
this.socket = socket;
this.onConnected(); // Add this line
resolve(socket);
});
socket.on('connect_error', (error) => {
clearTimeout(timeout);
cleanup();
reject(new Error(`Connection error: ${error.message}`));
});
socket.on('error', (error) => {
clearTimeout(timeout);
cleanup();
reject(new Error(`Socket error: ${error.message}`));
});
});
return result;
} catch (error) {
retries++;
if (retries === maxRetries) {
throw error;
}
await new Promise(resolve => setTimeout(resolve, retryDelay));
}
}
}
onConnected() {
// To be implemented by child classes
}
disconnect() {
if (this.socket) {
this.socket.disconnect();
this.socket = null;
}
}
}

View file

@ -0,0 +1,48 @@
// student.js
import { RoomParticipant } from './roomParticipant.js';
export class Student extends RoomParticipant {
nbrMessageReceived = 0;
constructor(username, roomName) {
super(username, roomName);
}
connectToRoom(baseUrl) {
return super.connectToRoom(baseUrl);
}
onConnected() {
this.joinRoom();
this.listenForTeacherMessage();
}
joinRoom() {
if (this.socket) {
this.socket.emit('join-room', {
enteredRoomName: this.roomName,
username: this.username
});
}
}
listenForTeacherMessage() {
if (this.socket) {
this.socket.on('message-sent-teacher', ({ message }) => {
this.nbrMessageReceived++;
this.respondToTeacher(message);
});
}
}
respondToTeacher(message) {
const reply = `${this.username} replying to: "${message}"`;
if (this.socket) {
this.socket.emit('message-from-student', {
roomName: this.roomName,
message: reply
});
}
}
}

View file

@ -0,0 +1,46 @@
import { RoomParticipant } from './roomParticipant.js';
export class Teacher extends RoomParticipant {
nbrMessageReceived = 0;
constructor(username, roomName) {
super(username, roomName);
this.ready = false;
}
connectToRoom(baseUrl) {
return super.connectToRoom(baseUrl);
}
onConnected() {
this.createRoom();
this.listenForStudentMessage();
}
createRoom() {
if (this.socket) {
this.socket.emit('create-room', this.roomName);
}
}
broadcastMessage(message) {
if (this.socket) {
this.socket.emit('message-from-teacher', {
roomName: this.roomName,
message
});
} else {
console.warn(`Teacher ${this.username} not ready to broadcast yet`);
}
}
listenForStudentMessage() {
if (this.socket) {
this.socket.on('message-sent-student', ({ message }) => {
//console.log(`Teacher ${this.username} received: "${message}"`);
this.nbrMessageReceived++;
});
}
}
}

View file

@ -0,0 +1,72 @@
import { RoomParticipant } from './roomParticipant.js';
export class Watcher extends RoomParticipant {
roomRessourcesData = [];
checkRessourceInterval = null;
constructor(username, roomName) {
super(username, roomName);
}
connectToRoom(baseUrl) {
return super.connectToRoom(baseUrl);
}
onConnected() {
this.startCheckingResources();
}
checkRessource() {
if (this.socket?.connected) {
try {
this.socket.emit("get-usage");
this.socket.once("usage-data", (data) => {
const timestamp = Date.now();
// Store each container's metrics separately with timestamp
data.forEach(containerStat => {
const existingData = this.roomRessourcesData.find(d => d.containerId === containerStat.containerId);
if (existingData) {
existingData.metrics.push({
timestamp,
...containerStat
});
} else {
this.roomRessourcesData.push({
containerId: containerStat.containerId,
containerName: containerStat.containerName,
metrics: [{
timestamp,
...containerStat
}]
});
}
});
});
} catch (error) {
console.warn(`Error capturing metrics for room ${this.roomName}:`, error.message);
}
}
}
startCheckingResources(intervalMs = 500) {
if (this.checkRessourceInterval) {
console.warn(`Resource checking is already running for room ${this.roomName}.`);
return;
}
this.checkRessourceInterval = setInterval(() => this.checkRessource(), intervalMs);
}
stopCheckingResources() {
if (this.checkRessourceInterval) {
clearInterval(this.checkRessourceInterval);
this.checkRessourceInterval = null;
}
}
disconnect() {
this.stopCheckingResources();
super.disconnect();
}
}

View file

@ -0,0 +1,26 @@
version: '3'
services:
stress-test:
build:
context: .
dockerfile: Dockerfile
container_name: stress-test
#environment:
# - BASE_URL=http://host.docker.internal
# - USER_EMAIL=admin@admin.com
# - USER_PASSWORD=admin
# - NUMBER_ROOMS=5
# - USERS_PER_ROOM=60
# - MAX_MESSAGES_ROUND=20
# - CONVERSATION_INTERVAL=1000
# - MESSAGE_RESPONSE_TIMEOUT=5000
# - BATCH_DELAY=1000
# - BATCH_SIZE=10
env_file:
- .env
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- ./output:/app/output

201
test/stressTest/main.js Normal file
View file

@ -0,0 +1,201 @@
import { attemptLoginOrRegister, createRoomContainer } from './utility/apiServices.js';
import { Student } from './class/student.js';
import { Teacher } from './class/teacher.js';
import { Watcher } from './class/watcher.js';
import { TestMetrics } from './class/metrics.js';
import dotenv from 'dotenv';
import generateMetricsReport from './utility/metrics_generator.js';
dotenv.config();
const config = {
baseUrl: process.env.BASE_URL || 'http://host.docker.internal',
auth: {
username: process.env.USER_EMAIL || 'admin@admin.com',
password: process.env.USER_PASSWORD || 'admin'
},
rooms: {
count: parseInt(process.env.NUMBER_ROOMS || '15'),
usersPerRoom: parseInt(process.env.USERS_PER_ROOM || '60'),
batchSize: parseInt(process.env.BATCH_SIZE || 5),
batchDelay: parseInt(process.env.BATCH_DELAY || 250)
},
simulation: {
maxMessages: parseInt(process.env.MAX_MESSAGES_ROUND || '20'),
messageInterval: parseInt(process.env.CONVERSATION_INTERVAL || '1000'),
responseTimeout: parseInt(process.env.MESSAGE_RESPONSE_TIMEOUT || 5000)
}
};
const rooms = new Map();
const metrics = new TestMetrics();
// Changes to setupRoom function
async function setupRoom(token, index) {
try {
const room = await createRoomContainer(config.baseUrl, token);
if (!room?.id) throw new Error('Room creation failed');
metrics.roomsCreated++;
const teacher = new Teacher(`teacher_${index}`, room.id);
// Only create watcher for first room (index 0)
const watcher = index === 0 ? new Watcher(`watcher_${index}`, room.id) : null;
await Promise.all([
teacher.connectToRoom(config.baseUrl)
.then(() => metrics.usersConnected++)
.catch(err => {
metrics.userConnectionsFailed++;
metrics.logError('teacherConnection', err);
console.warn(`Teacher ${index} connection failed:`, err.message);
}),
// Only connect watcher if it exists
...(watcher ? [
watcher.connectToRoom(config.baseUrl)
.then(() => metrics.usersConnected++)
.catch(err => {
metrics.userConnectionsFailed++;
metrics.logError('watcherConnection', err);
console.warn(`Watcher ${index} connection failed:`, err.message);
})
] : [])
]);
// Adjust number of students based on whether room has a watcher
const studentCount = watcher ?
config.rooms.usersPerRoom - 2 : // Room with watcher: subtract teacher and watcher
config.rooms.usersPerRoom - 1; // Rooms without watcher: subtract only teacher
const students = Array.from({ length: studentCount },
(_, i) => new Student(`student_${index}_${i}`, room.id));
rooms.set(room.id, { teacher, watcher, students });
return room.id;
} catch (err) {
metrics.roomsFailed++;
metrics.logError('roomSetup', err);
console.warn(`Room ${index} setup failed:`, err.message);
return null;
}
}
async function connectParticipants(roomId) {
const { students } = rooms.get(roomId);
const participants = [...students];
for (let i = 0; i < participants.length; i += config.rooms.batchSize) {
const batch = participants.slice(i, i + config.rooms.batchSize);
await Promise.all(batch.map(p =>
Promise.race([
p.connectToRoom(config.baseUrl).then(() => {
metrics.usersConnected++;
}),
new Promise((_, reject) => setTimeout(() => reject(new Error('Timeout')), 10000))
]).catch(err => {
metrics.userConnectionsFailed++;
metrics.logError('studentConnection', err);
console.warn(`Connection failed for ${p.username}:`, err.message);
})
));
await new Promise(resolve => setTimeout(resolve, config.rooms.batchDelay));
}
}
async function simulate() {
const simulations = Array.from(rooms.entries()).map(async ([roomId, { teacher, students }]) => {
const connectedStudents = students.filter(student => student.socket?.connected);
const expectedResponses = connectedStudents.length;
for (let i = 0; i < config.simulation.maxMessages; i++) {
metrics.messagesAttempted++;
const initialMessages = teacher.nbrMessageReceived;
try {
teacher.broadcastMessage(`Message ${i + 1} from ${teacher.username}`);
metrics.messagesSent++;
await Promise.race([
new Promise(resolve => {
const checkResponses = setInterval(() => {
const receivedResponses = teacher.nbrMessageReceived - initialMessages;
if (receivedResponses >= expectedResponses) {
metrics.messagesReceived += receivedResponses;
clearInterval(checkResponses);
resolve();
}
}, 100);
}),
new Promise((_, reject) =>
setTimeout(() => reject(new Error('Response timeout')), config.simulation.responseTimeout)
)
]);
} catch (error) {
metrics.logError('messaging', error);
console.error(`Error in room ${roomId} message ${i + 1}:`, error);
}
await new Promise(resolve => setTimeout(resolve, config.simulation.messageInterval));
}
});
await Promise.all(simulations);
console.log('All room simulations completed');
}
async function generateReport() {
const watcherRoom = Array.from(rooms.entries()).find(([_, room]) => room.watcher);
if (!watcherRoom) {
throw new Error('No watcher found in any room');
}
const data = {
[watcherRoom[0]]: watcherRoom[1].watcher.roomRessourcesData
};
return generateMetricsReport(data, metrics);
}
function cleanup() {
for (const { teacher, watcher, students } of rooms.values()) {
[teacher, watcher, ...students].forEach(p => p?.disconnect());
}
}
async function main() {
try {
const token = await attemptLoginOrRegister(config.baseUrl, config.auth.username, config.auth.password);
if (!token) throw new Error('Authentication failed');
console.log('Creating rooms...');
const roomIds = await Promise.all(
Array.from({ length: config.rooms.count }, (_, i) => setupRoom(token, i))
);
console.log('Connecting participants...');
await Promise.all(roomIds.filter(Boolean).map(connectParticipants));
console.log('Retrieving baseline metrics...');
await new Promise(resolve => setTimeout(resolve, 10000));
console.log('Starting simulation across all rooms...');
await simulate();
console.log('Simulation complete. Waiting for system stabilization...');
await new Promise(resolve => setTimeout(resolve, 10000));
console.log('Generating final report...');
const folderName = await generateReport();
console.log(`Metrics report generated in ${folderName.outputDir}`);
console.log('All done!');
} catch (error) {
metrics.logError('main', error);
console.error('Error:', error.message);
} finally {
cleanup();
}
}
['SIGINT', 'exit', 'uncaughtException', 'unhandledRejection'].forEach(event => {
process.on(event, cleanup);
});
main();

1230
test/stressTest/package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,22 @@
{
"name": "stresstest",
"version": "1.0.0",
"description": "main.js",
"type": "module",
"main": "main.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC",
"dependencies": {
"axios": "^1.7.7",
"chart.js": "^3.9.1",
"chartjs-node-canvas": "^4.1.6",
"dockerode": "^4.0.2",
"dotenv": "^16.4.5",
"p-limit": "^6.1.0",
"socket.io": "^4.8.1",
"socket.io-client": "^4.8.1"
}
}

View file

@ -0,0 +1,80 @@
import axios from "axios";
// Logs in a user.
async function login(baseUrl, email, password) {
if (!email || !password) throw new Error("Email and password are required.");
try {
const res = await axios.post(`${baseUrl}/api/user/login`, { email, password }, {
headers: { "Content-Type": "application/json" },
});
if (res.status === 200 && res.data.token) {
console.log(`Login successful for ${email}`);
return res.data.token;
}
throw new Error(`Login failed. Status: ${res.status}`);
} catch (error) {
console.error(`Login error for ${email}:`, error.message);
throw error;
}
}
// Registers a new user.
async function register(baseUrl, email, password) {
if (!email || !password) throw new Error("Email and password are required.");
try {
const res = await axios.post(`${baseUrl}/api/user/register`, { email, password }, {
headers: { "Content-Type": "application/json" },
});
if (res.status === 200) {
console.log(`Registration successful for ${email}`);
return res.data.message || "Registration completed successfully.";
}
throw new Error(`Registration failed. Status: ${res.status}`);
} catch (error) {
console.error(`Registration error for ${email}:`, error.message);
throw error;
}
}
// Attempts to log in a user, or registers and logs in if the login fails.
export async function attemptLoginOrRegister(baseUrl, username, password) {
console.log(`Authenticating user with server : ${baseUrl}, username: ${username}, password: ${password}`);
try {
return await login(baseUrl, username, password);
} catch (loginError) {
console.error(`Login failed for ${username}:`, loginError.message);
}
console.log(`Login failed for ${username}. Attempting registration...`);
try {
await register(baseUrl, username, password);
return await login(baseUrl, username, password);
} catch (registerError) {
console.error(`Registration and login failed for ${username}:`, registerError.message);
return null;
}
}
// Creates a new room
export async function createRoomContainer(baseUrl, token) {
if (!token) throw new Error("Authorization token is required.");
try {
const res = await axios.post(`${baseUrl}/api/room`, {}, {
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${token}`,
},
});
if (res.status === 200) return res.data;
throw new Error(`Room creation failed. Status: ${res.status}`);
} catch (error) {
console.error("Room creation error:", error.message);
throw error;
}
}

Some files were not shown because too many files have changed in this diff Show more