1
0
Fork 0
mirror of https://github.com/Findus23/cr-search.git synced 2024-09-19 15:23:44 +02:00

add Mini Primetime

This commit is contained in:
Lukas Winkler 2020-08-13 17:11:18 +02:00
parent b63fc66129
commit cc52019e25
Signed by: lukas
GPG key ID: 54DE4D798D244853
5 changed files with 31 additions and 23 deletions

22
data.py
View file

@ -71,3 +71,25 @@ single_speaker = {
}
assert set(single_speaker["Handbooker Helper"].keys()) == set(range(1, 44 + 1))
series_data = [
{
"name": "Campaign 1",
"playlist_id": "PL1tiwbzkOjQz7D0l_eLJGAISVtcL7oRu_",
},
{
"name": "Campaign 2",
"playlist_id": "PL1tiwbzkOjQxD0jjAE7PsWoaCrs0EkBH2"
},
{
"name": "Handbooker Helper",
"playlist_id": "PL1tiwbzkOjQyr6-gqJ8r29j_rJkR49uDN",
"single_speaker": True
},
{
"name": "Mini Primetime",
"playlist_id": "PL1tiwbzkOjQz9kKDaPRPrX2E7RPTaxEZd",
"initial_speaker": "Will"
}
]

View file

@ -8,26 +8,10 @@ from subprocess import run
import youtube_dl
from peewee import DoesNotExist
from data import series_data
from models import Episode, Series, Line, Phrase
from utils import srtdir, pretty_title
series_data = [
{
"name": "Campaign 1",
"playlist_id": "PL1tiwbzkOjQz7D0l_eLJGAISVtcL7oRu_",
},
{
"name": "Campaign 2",
"playlist_id": "PL1tiwbzkOjQxD0jjAE7PsWoaCrs0EkBH2"
},
{
"name": "Handbooker Helper",
"playlist_id": "PL1tiwbzkOjQyr6-gqJ8r29j_rJkR49uDN",
"single_speaker": True
}
]
def main():
os.nice(15)
@ -61,11 +45,10 @@ def main():
regex = re.compile(r"Ep(?:is|si)ode (\d+)")
for nr, video in enumerate(videos, 1):
# if Episode.select().where((Episode.season == campaign) & (Episode.video_number == nr)).count() == 1:
# print(f"already imported {vttfile}")
# continue
try:
e = Episode.select().where((Episode.series == s) & (Episode.video_number == nr)).get()
if e.downloaded:
continue
except DoesNotExist:
e = Episode()
e.series = s

View file

@ -47,7 +47,7 @@ for episode in Episode.select().where((Episode.phrases_imported == False) & (Epi
if noun_chunk in lemma_cache:
lemmas = lemma_cache[noun_chunk]
else:
lemmas = "|".join([token.lemma_ for token in nlp(noun_chunk)])
lemmas = "|".join([token.lemma_ for token in nlp(noun_chunk)]).lower()
lemma_cache[noun_chunk] = lemmas
if lemmas not in nouns:
nouns[lemmas] = {

View file

@ -28,7 +28,7 @@ export interface Line {
"starttime": number;
"endtime": number;
"text": string;
"person": Person;
"person": Person | null;
"episode": Episode;
}

View file

@ -247,7 +247,10 @@ export default Vue.extend({
} else if (line.isnote) {
return "purple";
}
return line.person.color;
if (line.person) {
return line.person.color;
}
return "white";
},
doYtOptIn() {
this.showYtOptIn = false;