Appelman, N., Fahy, R., Quintais, J. Using Terms and Conditions to apply Fundamental Rights to Content Moderation In: German Law Journal, 2023. @article{nokey,
title = {Using Terms and Conditions to apply Fundamental Rights to Content Moderation},
author = {Quintais, J. and Appelman, N. and Fahy, R.},
doi = {10.1017/glj.2023.53},
year = {2023},
date = {2023-07-11},
journal = {German Law Journal},
abstract = {Large online platforms provide an unprecedented means for exercising freedom of expression online and wield enormous power over public participation in the online democratic space. However, it is increasingly clear that their systems, where (automated) content moderation decisions are taken based on a platformʼs terms and conditions (T\&Cs), are fundamentally broken. Content moderation systems have been said to undermine freedom of expression, especially where important public interest speech ends up suppressed, such as speech by minority and marginalized groups. Indeed, these content moderation systems have been criticized for their overly vague rules of operation, inconsistent enforcement, and an overdependence on automation. Therefore, in order to better protect freedom of expression online, international human rights bodies and civil society organizations have argued that platforms “should incorporate directly” principles of fundamental rights law into their T\&Cs. Under EU law, and apart from a rule in the Terrorist Content Regulation, platforms had until recently no explicit obligation to incorporate fundamental rights into their T\&Cs. However, an important provision in the Digital Services Act (DSA) will change this. Crucially, Article 14 DSA lays down new rules on how platforms can enforce their T\&Cs, including that platforms must have “due regard” to the “fundamental rights” of users under the EU Charter of Fundamental Rights. In this article, we critically examine the topic of enforceability of fundamental rights via T\&Cs through the prism of Article 14 DSA. We ask whether this provision requires platforms to apply EU fundamental rights law and to what extent this may curb the power of Big Tech over online speech. We conclude that Article 14 will make it possible, in principle, to establish the indirect horizontal effect of fundamental rights in the relationship between online platforms and their users. But in order for the application and enforcement of T\&Cs to take due regard of fundamental rights, Article 14 must be operationalized within the framework of the international and European fundamental rights standards. If this is possible Article 14 may fulfil its revolutionary potential.
},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Large online platforms provide an unprecedented means for exercising freedom of expression online and wield enormous power over public participation in the online democratic space. However, it is increasingly clear that their systems, where (automated) content moderation decisions are taken based on a platformʼs terms and conditions (T&Cs), are fundamentally broken. Content moderation systems have been said to undermine freedom of expression, especially where important public interest speech ends up suppressed, such as speech by minority and marginalized groups. Indeed, these content moderation systems have been criticized for their overly vague rules of operation, inconsistent enforcement, and an overdependence on automation. Therefore, in order to better protect freedom of expression online, international human rights bodies and civil society organizations have argued that platforms “should incorporate directly” principles of fundamental rights law into their T&Cs. Under EU law, and apart from a rule in the Terrorist Content Regulation, platforms had until recently no explicit obligation to incorporate fundamental rights into their T&Cs. However, an important provision in the Digital Services Act (DSA) will change this. Crucially, Article 14 DSA lays down new rules on how platforms can enforce their T&Cs, including that platforms must have “due regard” to the “fundamental rights” of users under the EU Charter of Fundamental Rights. In this article, we critically examine the topic of enforceability of fundamental rights via T&Cs through the prism of Article 14 DSA. We ask whether this provision requires platforms to apply EU fundamental rights law and to what extent this may curb the power of Big Tech over online speech. We conclude that Article 14 will make it possible, in principle, to establish the indirect horizontal effect of fundamental rights in the relationship between online platforms and their users. But in order for the application and enforcement of T&Cs to take due regard of fundamental rights, Article 14 must be operationalized within the framework of the international and European fundamental rights standards. If this is possible Article 14 may fulfil its revolutionary potential.
|
Appelman, N., Buri, I., Fahy, R., Quintais, J., Straub, M., van Hoboken, J. Putting the DSA into Practice: Enforcement, Access to Justice and Global Implications 2023, ISBN: 9783757517960. @techreport{nokey,
title = {Putting the DSA into Practice: Enforcement, Access to Justice and Global Implications},
author = {van Hoboken, J. and Quintais, J. and Appelman, N. and Fahy, R. and Buri, I. and Straub, M.},
url = {https://www.ivir.nl/vhoboken-et-al_putting-the-dsa-into-practice/
https://verfassungsblog.de/wp-content/uploads/2023/02/vHoboken-et-al_Putting-the-DSA-into-Practice.pdf},
doi = {10.17176/20230208-093135-0},
isbn = {9783757517960},
year = {2023},
date = {2023-02-17},
urldate = {2023-02-17},
publisher = {Verfassungsbooks},
abstract = {The Digital Services Act was finally published in the Official Journal of the European Union on 27 October 2022. This publication marks the end of a years-long drafting and negotiation process, and opens a new chapter: that of its enforcement, practicable access to justice, and potential to set global precedents. The Act has been portrayed as Europe’s new „Digital Constitution“, which affirms the primacy of democratic rulemaking over the private transnational ordering mechanisms of Big Tech. With it, the European Union aims once again to set a global standard in the regulation of the digital environment. But will the Digital Services Act be able to live up to its expectations, and under what conditions?},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
The Digital Services Act was finally published in the Official Journal of the European Union on 27 October 2022. This publication marks the end of a years-long drafting and negotiation process, and opens a new chapter: that of its enforcement, practicable access to justice, and potential to set global precedents. The Act has been portrayed as Europe’s new „Digital Constitution“, which affirms the primacy of democratic rulemaking over the private transnational ordering mechanisms of Big Tech. With it, the European Union aims once again to set a global standard in the regulation of the digital environment. But will the Digital Services Act be able to live up to its expectations, and under what conditions? |
Appelman, N., Fahy, R., Quintais, J. Using Terms and Conditions to Apply Fundamental Rights to Content Moderation In: German Law Journal, Komende. @article{nokey,
title = {Using Terms and Conditions to Apply Fundamental Rights to Content Moderation},
author = {Quintais, J. and Appelman, N. and Fahy, R.},
url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4286147
https://osf.io/f2n7m/},
year = {2022},
date = {2022-11-25},
journal = {German Law Journal},
abstract = {Large online platforms provide an unprecedented means for exercising freedom of expression online and wield enormous power over public participation in the online democratic space. However, it is increasingly clear that their systems, where (automated) content moderation decisions are taken based on a platform's terms and conditions (T\&Cs), are fundamentally broken. Content moderation systems have been said to undermine freedom of expression, especially where important public interest speech ends up suppressed, such as speech by minority and marginalized groups. Indeed, these content moderation systems have been criticized for their overly vague rules of operation, inconsistent enforcement, and an overdependence on automation. Therefore, in order to better protect freedom of expression online, international human rights bodies and civil society organizations have argued that platforms “should incorporate directly” principles of fundamental rights law into their T\&Cs. Under EU law, and apart from a rule in the Terrorist Content Regulation, platforms had until recently no explicit obligation to incorporate fundamental rights into their T\&Cs. However, an important provision in the Digital Services Act (DSA) will change this. Crucially, Article 14 DSA lays down new rules on how platforms can enforce their T\&Cs, including that platforms must have “due regard” to the “fundamental rights” of users under the EU Charter of Fundamental Rights. In this article, we critically examine the topic of enforceability of fundamental rights via T\&Cs through the prism of Article 14 DSA. We ask whether this provision requires platforms to apply EU fundamental rights law and to what extent this may curb the power of Big Tech over online speech. We conclude that Article 14 will make it possible, in principle, to establish the indirect horizontal effect of fundamental rights in the relationship between online platforms and their users. But in order for the application and enforcement of T\&Cs to take due regard of fundamental rights, Article 14 must be operationalized within the framework of the international and European fundamental rights standards, and therefore allowing Article 14 to fulfil its revolutionary potential.},
keywords = {},
pubstate = {forthcoming},
tppubtype = {article}
}
Large online platforms provide an unprecedented means for exercising freedom of expression online and wield enormous power over public participation in the online democratic space. However, it is increasingly clear that their systems, where (automated) content moderation decisions are taken based on a platform's terms and conditions (T&Cs), are fundamentally broken. Content moderation systems have been said to undermine freedom of expression, especially where important public interest speech ends up suppressed, such as speech by minority and marginalized groups. Indeed, these content moderation systems have been criticized for their overly vague rules of operation, inconsistent enforcement, and an overdependence on automation. Therefore, in order to better protect freedom of expression online, international human rights bodies and civil society organizations have argued that platforms “should incorporate directly” principles of fundamental rights law into their T&Cs. Under EU law, and apart from a rule in the Terrorist Content Regulation, platforms had until recently no explicit obligation to incorporate fundamental rights into their T&Cs. However, an important provision in the Digital Services Act (DSA) will change this. Crucially, Article 14 DSA lays down new rules on how platforms can enforce their T&Cs, including that platforms must have “due regard” to the “fundamental rights” of users under the EU Charter of Fundamental Rights. In this article, we critically examine the topic of enforceability of fundamental rights via T&Cs through the prism of Article 14 DSA. We ask whether this provision requires platforms to apply EU fundamental rights law and to what extent this may curb the power of Big Tech over online speech. We conclude that Article 14 will make it possible, in principle, to establish the indirect horizontal effect of fundamental rights in the relationship between online platforms and their users. But in order for the application and enforcement of T&Cs to take due regard of fundamental rights, Article 14 must be operationalized within the framework of the international and European fundamental rights standards, and therefore allowing Article 14 to fulfil its revolutionary potential. |
Appelman, N., Buri, I., Fahy, R., Quintais, J., Straub, M., van Hoboken, J. The DSA has been published – now the difficult bit begins In: Verfassungsblog, 2022. @article{nokey,
title = {The DSA has been published \textendash now the difficult bit begins},
author = {van Hoboken, J. and Buri, I. and Quintais, J. and Fahy, R. and Appelman, N. and Straub, M.},
url = {https://verfassungsblog.de/dsa-published/},
doi = {10.17176/20221031-095722-0},
year = {2022},
date = {2022-10-31},
urldate = {2022-10-31},
journal = {Verfassungsblog},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Appelman, N., Fahy, R., van Hoboken, J. Social Welfare, Risk Profiling and Fundamental Rights: The Case of SyRI in the Netherlands In: JIPITEC, vol. 12, nr. 4, pp. 257-271, 2021. @article{nokey,
title = {Social Welfare, Risk Profiling and Fundamental Rights: The Case of SyRI in the Netherlands},
author = {Appelman, N. and Fahy, R. and van Hoboken, J.},
url = {https://www.ivir.nl/publicaties/download/jipitec_2021_4.pdf
https://www.jipitec.eu/issues/jipitec-12-4-2021/5407},
year = {2021},
date = {2021-12-16},
journal = {JIPITEC},
volume = {12},
number = {4},
pages = {257-271},
abstract = {This article discusses the use of automated decisioning-making (ADM) systems by public administrative bodies, particularly systems designed to combat social-welfare fraud, from a European fundamental rights law perspective. The article begins by outlining the emerging fundamental rights issues in relation to ADM systems used by public administrative bodies. Building upon this, the article critically analyses a recent landmark judgment from the Netherlands and uses this as a case study for discussion of the application of fundamental rights law to ADM systems by public authorities more generally. In the so-called SyRI judgment, the District Court of The Hague held that a controversial automated welfare-fraud detection system (SyRI), which allows the linking and analysing of data from an array of government agencies to generate fraud-risk reports on people, violated the right to private life, guaranteed under Article 8 of the European Convention on Human Rights (ECHR). The Court held that SyRI was insufficiently transparent, and contained insufficient safeguards, to protect the right to privacy, in violation of Article 8 ECHR. This was one of the first times an ADM system being used by welfare authorities has been halted on the basis of Article 8 ECHR. The article critically analyses the SyRI judgment from a fundamental rights perspective, including by examining how the Court brought principles contained in the General Data Protection Regulation within the rubric of Article 8 ECHR as well as the importance the Court attaches to the principle of transparency under Article 8 ECHR. Finally, the article discusses how the Dutch government responded to the judgment. and discusses proposed new legislation, which is arguably more invasive, with the article concluding with some lessons that can be drawn for the broader policy and legal debate on ADM systems used by public authorities. implications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This article discusses the use of automated decisioning-making (ADM) systems by public administrative bodies, particularly systems designed to combat social-welfare fraud, from a European fundamental rights law perspective. The article begins by outlining the emerging fundamental rights issues in relation to ADM systems used by public administrative bodies. Building upon this, the article critically analyses a recent landmark judgment from the Netherlands and uses this as a case study for discussion of the application of fundamental rights law to ADM systems by public authorities more generally. In the so-called SyRI judgment, the District Court of The Hague held that a controversial automated welfare-fraud detection system (SyRI), which allows the linking and analysing of data from an array of government agencies to generate fraud-risk reports on people, violated the right to private life, guaranteed under Article 8 of the European Convention on Human Rights (ECHR). The Court held that SyRI was insufficiently transparent, and contained insufficient safeguards, to protect the right to privacy, in violation of Article 8 ECHR. This was one of the first times an ADM system being used by welfare authorities has been halted on the basis of Article 8 ECHR. The article critically analyses the SyRI judgment from a fundamental rights perspective, including by examining how the Court brought principles contained in the General Data Protection Regulation within the rubric of Article 8 ECHR as well as the importance the Court attaches to the principle of transparency under Article 8 ECHR. Finally, the article discusses how the Dutch government responded to the judgment. and discusses proposed new legislation, which is arguably more invasive, with the article concluding with some lessons that can be drawn for the broader policy and legal debate on ADM systems used by public authorities. implications. |
Appelman, N., Fahy, R., Helberger, N. The perils of legally defining disinformation In: Internet Policy Review, vol. 10, nr. 4, 2021. @article{nokey,
title = {The perils of legally defining disinformation},
author = {Fahy, R. and Helberger, N. and Appelman, N.},
url = {https://www.ivir.nl/publicaties/download/InternetPolicyReview_2021.pdf},
doi = {10.14763/2021.4.1584},
year = {2021},
date = {2021-11-12},
journal = {Internet Policy Review},
volume = {10},
number = {4},
abstract = {EU policy considers disinformation to be harmful content, rather than illegal content. However, EU member states have recently been making disinformation illegal. This article discusses the definitions that form the basis of EU disinformation policy, and analyses national legislation in EU member states applicable to the definitions of disinformation, in light of freedom of expression and the proposed Digital Services Act. The article discusses the perils of defining disinformation in EU legislation, and including provisions on online platforms being required to remove illegal content, which may end up being applicable to overbroad national laws criminalising false news and false information.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
EU policy considers disinformation to be harmful content, rather than illegal content. However, EU member states have recently been making disinformation illegal. This article discusses the definitions that form the basis of EU disinformation policy, and analyses national legislation in EU member states applicable to the definitions of disinformation, in light of freedom of expression and the proposed Digital Services Act. The article discusses the perils of defining disinformation in EU legislation, and including provisions on online platforms being required to remove illegal content, which may end up being applicable to overbroad national laws criminalising false news and false information. |
Appelman, N., Fahy, R., Quintais, J. Using Terms and Conditions to apply Fundamental Rights to Content Moderation: Is Article 12 DSA a Paper Tiger? Verfassungsblog, (Ed.): Verfassungsblog 2021. @online{Appelman2021,
title = {Using Terms and Conditions to apply Fundamental Rights to Content Moderation: Is Article 12 DSA a Paper Tiger?},
author = {Appelman, N. and Quintais, J. and Fahy, R.},
editor = {Verfassungsblog},
url = {https://verfassungsblog.de/power-dsa-dma-06/},
doi = {10.17176/20210901-233103-0.},
year = {2021},
date = {2021-09-01},
organization = {Verfassungsblog},
keywords = {},
pubstate = {published},
tppubtype = {online}
}
|
Appelman, N., Fahy, R., Quintais, J. Article 12 DSA: Will platforms be required to apply EU fundamental rights in content moderation decisions? DSA Observatory 2021. @online{Quintais2021f,
title = {Article 12 DSA: Will platforms be required to apply EU fundamental rights in content moderation decisions? },
author = {Quintais, J. and Appelman, N. and Fahy, R.},
url = {https://dsa-observatory.eu/2021/05/31/article-12-dsa-will-platforms-be-required-to-apply-eu-fundamental-rights-in-content-moderation-decisions/},
year = {2021},
date = {2021-05-31},
organization = {DSA Observatory},
keywords = {},
pubstate = {published},
tppubtype = {online}
}
|
Appelman, N., Blom, T., van Duin, A., Fahy, R., Helberger, N., Steel, M., Stringhi, E., van Hoboken, J., Zarouali, B. WODC-onderzoek: Voorziening voor verzoeken tot snelle verwijdering van onrechtmatige online content 2020. @techreport{vanHoboken2020d,
title = {WODC-onderzoek: Voorziening voor verzoeken tot snelle verwijdering van onrechtmatige online content},
author = {van Hoboken, J. and Appelman, N. and van Duin, A. and Blom, T. and Zarouali, B. and Fahy, R. and Steel, M. and Stringhi, E. and Helberger, N.},
url = {https://www.ivir.nl/publicaties/download/WODC_voorziening_onrechtmatige_content.pdf},
year = {2020},
date = {2020-11-12},
abstract = {Dit onderzoek is uitgegeven als onderdeel van het speerpunt van de Minister voor Rechtsbescherming om de positie van slachtoffers van onrechtmatige uitingen op het internet te verbeteren. Aanleiding is dat het voor mensen als te moeilijk ervaren wordt om onrechtmatige online content snel verwijderd te krijgen. Dit rapport biedt inzicht in de juridische en praktische haalbaarheid van een voorziening voor de verwijdering van onrechtmatige online content die mensen persoonlijk raakt. Onrechtmatige content is informatie, door mensen op het internet geplaatst, die in strijd is met het recht, vanwege de schadelijke gevolgen ervan en/of omdat de belangen van anderen daardoor op ernstige wijze worden aangetast. Hierbij moet, bijvoorbeeld, gedacht worden aan bedreigingen, privacy-inbreuken of wraakporno. Het doel van de onderzochte voorziening is om mensen in staat te stellen deze onrechtmatige online content zo snel mogelijk te verwijderen. Het onderzoek focust op onrechtmatige online content die mensen in hun persoon raakt en daarmee onder het recht op priv\'{e}leven uit artikel 8 Europees Verdrag voor de Rechten van de Mens (“EVRM”) valt.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Dit onderzoek is uitgegeven als onderdeel van het speerpunt van de Minister voor Rechtsbescherming om de positie van slachtoffers van onrechtmatige uitingen op het internet te verbeteren. Aanleiding is dat het voor mensen als te moeilijk ervaren wordt om onrechtmatige online content snel verwijderd te krijgen. Dit rapport biedt inzicht in de juridische en praktische haalbaarheid van een voorziening voor de verwijdering van onrechtmatige online content die mensen persoonlijk raakt. Onrechtmatige content is informatie, door mensen op het internet geplaatst, die in strijd is met het recht, vanwege de schadelijke gevolgen ervan en/of omdat de belangen van anderen daardoor op ernstige wijze worden aangetast. Hierbij moet, bijvoorbeeld, gedacht worden aan bedreigingen, privacy-inbreuken of wraakporno. Het doel van de onderzochte voorziening is om mensen in staat te stellen deze onrechtmatige online content zo snel mogelijk te verwijderen. Het onderzoek focust op onrechtmatige online content die mensen in hun persoon raakt en daarmee onder het recht op privéleven uit artikel 8 Europees Verdrag voor de Rechten van de Mens (“EVRM”) valt. |
Appelman, N., Ausloos, J., Drunen, M. van, Helberger, N. News Recommenders and Cooperative Explainability: Confronting the contextual complexity in AI explanations 2020. @techreport{Drunen2020b,
title = {News Recommenders and Cooperative Explainability: Confronting the contextual complexity in AI explanations},
author = {Drunen, M. van and Ausloos, J. and Appelman, N. and Helberger, N.},
url = {https://www.ivir.nl/publicaties/download/Visiepaper-explainable-AI-final.pdf},
year = {2020},
date = {2020-11-03},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
|
Appelman, N., Fahy, R. Netherlands/Research In: pp. 164-175, 2020, (Chapter in: Report Automating Society 2020, Chiusi, F., Fischer, S., Kayser-Bril, N. & Spielkamp, M. eds., Berlin: AlgorithmWatch, October 2020.). @inbook{Fahy2020b,
title = {Netherlands/Research},
author = {Fahy, R. and Appelman, N.},
url = {https://www.ivir.nl/publicaties/download/Automating-Society-Report-2020.pdf
https://automatingsociety.algorithmwatch.org/},
year = {2020},
date = {2020-10-29},
pages = {164-175},
abstract = {How are AI-based systems being used by private companies and public authorities in Europe? The new report by AlgorithmWatch and Bertelsmann Stiftung sheds light on what role automated decision-making (ADM) systems play in our lives. As a result of the most comprehensive research on the issue conducted in Europe so far, the report covers the current use of and policy debates around ADM systems in 16 European countries and at EU level.},
note = {Chapter in: Report Automating Society 2020, Chiusi, F., Fischer, S., Kayser-Bril, N. \& Spielkamp, M. eds., Berlin: AlgorithmWatch, October 2020.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
How are AI-based systems being used by private companies and public authorities in Europe? The new report by AlgorithmWatch and Bertelsmann Stiftung sheds light on what role automated decision-making (ADM) systems play in our lives. As a result of the most comprehensive research on the issue conducted in Europe so far, the report covers the current use of and policy debates around ADM systems in 16 European countries and at EU level. |
Appelman, N., Fahy, R. Netherlands In: 2020, (Chapter in: F. Chiusi, S. Fischer, & M. Spielkamp (eds.), Automated Decision-Making Systems in the COVID-19 Pandemic: A European Perspective, AlgorithmWatch, 2020). @inbook{Appelman2020b,
title = {Netherlands},
author = {Appelman, N. and Fahy, R.},
url = {https://algorithmwatch.org/wp-content/uploads/2020/08/ADM-systems-in-the-Covid-19-pandemic-Report-by-AW-BSt-Sept-2020.pdf},
year = {2020},
date = {2020-09-01},
abstract = {Contact tracing apps for smartphones, thermal scanners, face recognition technology: high hopes have been placed by both local administrations and national governments in applications and devices like these, aimed at containing the outbreak of the virus. The new publication Automated Decision-Making Systems in the COVID-19 Pandemic: A European Perspective gathers detailed examples of ADM systems in use, compiled by a network of researchers covering 16 countries. It provides an initial mapping and exploration of ADM systems implemented throughout Europe as a consequence of the COVID-19 outbreak.},
note = {Chapter in: F. Chiusi, S. Fischer, \& M. Spielkamp (eds.), Automated Decision-Making Systems in the COVID-19 Pandemic: A European Perspective, AlgorithmWatch, 2020},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Contact tracing apps for smartphones, thermal scanners, face recognition technology: high hopes have been placed by both local administrations and national governments in applications and devices like these, aimed at containing the outbreak of the virus. The new publication Automated Decision-Making Systems in the COVID-19 Pandemic: A European Perspective gathers detailed examples of ADM systems in use, compiled by a network of researchers covering 16 countries. It provides an initial mapping and exploration of ADM systems implemented throughout Europe as a consequence of the COVID-19 outbreak. |
Appelman, N., Fahy, R., Toh, J., van Hoboken, J. Techno-optimism and solutionism as a crisis response In: 2020, (Chapter in L. Taylor, G. Sharma, A. Martin, and S. Jameson (eds.), Data Justice and COVID-19: Global Perspectives, Meatspace Press, 2020)). @inbook{Appelman2020,
title = {Techno-optimism and solutionism as a crisis response},
author = {Appelman, N. and Toh, J. and Fahy, R. and van Hoboken, J.},
url = {https://pure.uva.nl/admin/files/49662485/Data_Justice_and_COVID_19.pdf},
year = {2020},
date = {2020-08-27},
abstract = {The COVID-19 pandemic has reshaped how social, economic, and political power is created, exerted, and extended through technology. Through case studies from around the world, this book analyses the ways in which technologies of monitoring infections, information, and behaviour have been applied and justified during the emergency, what their side-effects have been, and what kinds of resistance they have met.},
note = {Chapter in L. Taylor, G. Sharma, A. Martin, and S. Jameson (eds.), Data Justice and COVID-19: Global Perspectives, Meatspace Press, 2020)},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
The COVID-19 pandemic has reshaped how social, economic, and political power is created, exerted, and extended through technology. Through case studies from around the world, this book analyses the ways in which technologies of monitoring infections, information, and behaviour have been applied and justified during the emergency, what their side-effects have been, and what kinds of resistance they have met. |
Appelman, N., Fahy, R., Helberger, N., Leerssen, P., McGonagle, T., van Eijk, N., van Hoboken, J. Het juridisch kader voor de verspreiding van desinformatie via internetdiensten en de regulering van politieke advertenties 2020, (Rapport voor het ministerie van Binnenlandse Zaken en Koninkrijksrelaties, Amsterdam, december 2019). @techreport{vanHoboken2020b,
title = {Het juridisch kader voor de verspreiding van desinformatie via internetdiensten en de regulering van politieke advertenties},
author = {van Hoboken, J. and Appelman, N. and Fahy, R. and Leerssen, P. and McGonagle, T. and van Eijk, N. and Helberger, N.},
url = {https://www.ivir.nl/publicaties/download/Rapport_desinformatie_december2019.pdf
https://www.ivir.nl/publicaties/download/Kamerbrief_desinformatie.pdf},
year = {2020},
date = {2020-05-14},
abstract = {Het onderzoek, uitgevoerd in opdracht van het Ministerie van Binnenlandse Zaken en Koninkrijksrelaties, analyseert het juridisch kader van toepassing op de verspreiding van desinformatie via online diensten. Het rapport biedt een uitgebreid overzicht van de relevante Europese en Nederlandse normen en doet aanbevelingen voor de verbetering van dit juridisch kader. Het onderzoek bevat daarnaast ook een analyse van het relevant wettelijke kader in de V.S., het V.K, Frankrijk, Duitsland, Canada en Zweden.
Het rapport maakt duidelijk hoe de vrijheid van meningsuiting als rode draad door het wettelijke kader loopt. Dit fundamentele recht vormt zowel de buitenste grens voor regulering als een basis voor nieuwe maatregelen, bijvoorbeeld voor de bescherming van pluralisme. Het wettelijk kader van toepassing op desinformatie blijkt zeer breed, bevat verschillende reguleringsniveaus, verschuift afhankelijk van de specifieke context en omvat vele al bestaande normen voor de regulering van specifieke typen desinformatie. Verder blijkt het toezicht op dit wettelijk kader vrij gefragmenteerd te zijn. Op basis van deze analyse komt het rapport tot aan aantal aanbevelingen. De aanbevelingen hebben onder andere betrekking op het gebruik van de term desinformatie als beleidsterm, het omgaan met de spanningen op de verschillende beleidsniveaus, de regulering van internettussenpersonen door middel van transparantie verplichtingen en de samenwerking tussen de verschillende toezichthouders.
Voorafgaand aan deze eindrapportage is in eind 2019 het interim-rapport gepubliceerd. Dit rapport focuste op de relatie tussen desinformatie en online politieke advertenties. Beide studies zijn onderdeel van het onderzoeksproject ‘Digital Transition of Decision-Making at the Faculty of Law of the University of Amsterdam’ dat zich buigt over vraagstukken gerelateerd aan kunstmatige intelligentie en publieke waarden, data governance, en online platforms. },
note = {Rapport voor het ministerie van Binnenlandse Zaken en Koninkrijksrelaties, Amsterdam, december 2019},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Het onderzoek, uitgevoerd in opdracht van het Ministerie van Binnenlandse Zaken en Koninkrijksrelaties, analyseert het juridisch kader van toepassing op de verspreiding van desinformatie via online diensten. Het rapport biedt een uitgebreid overzicht van de relevante Europese en Nederlandse normen en doet aanbevelingen voor de verbetering van dit juridisch kader. Het onderzoek bevat daarnaast ook een analyse van het relevant wettelijke kader in de V.S., het V.K, Frankrijk, Duitsland, Canada en Zweden.
Het rapport maakt duidelijk hoe de vrijheid van meningsuiting als rode draad door het wettelijke kader loopt. Dit fundamentele recht vormt zowel de buitenste grens voor regulering als een basis voor nieuwe maatregelen, bijvoorbeeld voor de bescherming van pluralisme. Het wettelijk kader van toepassing op desinformatie blijkt zeer breed, bevat verschillende reguleringsniveaus, verschuift afhankelijk van de specifieke context en omvat vele al bestaande normen voor de regulering van specifieke typen desinformatie. Verder blijkt het toezicht op dit wettelijk kader vrij gefragmenteerd te zijn. Op basis van deze analyse komt het rapport tot aan aantal aanbevelingen. De aanbevelingen hebben onder andere betrekking op het gebruik van de term desinformatie als beleidsterm, het omgaan met de spanningen op de verschillende beleidsniveaus, de regulering van internettussenpersonen door middel van transparantie verplichtingen en de samenwerking tussen de verschillende toezichthouders.
Voorafgaand aan deze eindrapportage is in eind 2019 het interim-rapport gepubliceerd. Dit rapport focuste op de relatie tussen desinformatie en online politieke advertenties. Beide studies zijn onderdeel van het onderzoeksproject ‘Digital Transition of Decision-Making at the Faculty of Law of the University of Amsterdam’ dat zich buigt over vraagstukken gerelateerd aan kunstmatige intelligentie en publieke waarden, data governance, en online platforms. |
Appelman, N., Fahy, R., Helberger, N., Leerssen, P., McGonagle, T., van Eijk, N., van Hoboken, J. The legal framework on the dissemination of disinformation through Internet services and the regulation of political advertising 2020, (A report for the Ministry of the Interior and Kingdom Relations, Amsterdam, December 2019). @techreport{vanHoboken2020c,
title = {The legal framework on the dissemination of disinformation through Internet services and the regulation of political advertising},
author = {van Hoboken, J. and Appelman, N. and Fahy, R. and Leerssen, P. and McGonagle, T. and van Eijk, N. and Helberger, N.},
url = {https://www.ivir.nl/publicaties/download/Report_Disinformation_Dec2019-1.pdf},
year = {2020},
date = {2020-05-14},
abstract = {The study, commissioned by the Dutch government, focusses on the legal framework governing the dissemination of disinformation, in particular through Internet services. The study provides an extensive overview of relevant European and Dutch legal norms relating to the spread of online disinformation, and recommendations are given on how to improve this framework. Additionally, the study includes an analysis of the relevant legal framework in 6 different countries (U.K., U.S., France, Germany, Sweden and Canada).
The report makes clear how the freedom of expression runs as a central theme through the legal framework, both forming the outer limit for possible regulation and a legal basis to create new regulation (e.g. protecting pluralism). The legal framework governing disinformation online is shown to be very broad, encompassing different levels of regulation, shifting depending on the context and already regulating many different types of disinformation. Further, oversight seems to be fragmented with many different supervisory authorities involved but limited cooperation. Based on this analysis, the report offers several recommendations, such as on the use of disinformation not as a legal term but a policy term, on negotiating the tensions on the different policy levels, on the regulation of internet intermediaries including transparency obligations and on increased cooperation between the relevant supervisory authorities.
Previously, the interim report focussing on political advertising was published in late 2019. Both these studies have been carried out in the context of the research initiative on the Digital Transition of Decision-Making at the Faculty of Law of the University of Amsterdam, focussing on questions related to AI and public values, data governance and online platforms.},
note = {A report for the Ministry of the Interior and Kingdom Relations, Amsterdam, December 2019},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
The study, commissioned by the Dutch government, focusses on the legal framework governing the dissemination of disinformation, in particular through Internet services. The study provides an extensive overview of relevant European and Dutch legal norms relating to the spread of online disinformation, and recommendations are given on how to improve this framework. Additionally, the study includes an analysis of the relevant legal framework in 6 different countries (U.K., U.S., France, Germany, Sweden and Canada).
The report makes clear how the freedom of expression runs as a central theme through the legal framework, both forming the outer limit for possible regulation and a legal basis to create new regulation (e.g. protecting pluralism). The legal framework governing disinformation online is shown to be very broad, encompassing different levels of regulation, shifting depending on the context and already regulating many different types of disinformation. Further, oversight seems to be fragmented with many different supervisory authorities involved but limited cooperation. Based on this analysis, the report offers several recommendations, such as on the use of disinformation not as a legal term but a policy term, on negotiating the tensions on the different policy levels, on the regulation of internet intermediaries including transparency obligations and on increased cooperation between the relevant supervisory authorities.
Previously, the interim report focussing on political advertising was published in late 2019. Both these studies have been carried out in the context of the research initiative on the Digital Transition of Decision-Making at the Faculty of Law of the University of Amsterdam, focussing on questions related to AI and public values, data governance and online platforms. |
Appelman, N., Bodó, B., Schwichow, H. von Money talks? The impact of corporate funding on information law research 2020, (Amsterdam: IViR, Berlin: European Hub of the NoC). @techreport{Bod\'{o}2020,
title = {Money talks? The impact of corporate funding on information law research},
author = {Bod\'{o}, B. and Schwichow, H. von and Appelman, N.},
url = {https://www.ivir.nl/publicaties/download/money-talks-summary-report-final.pdf},
year = {2020},
date = {2020-05-07},
abstract = {Corporate funding is a contentious issue in information law and policy research. In the fall of 2019, the Institute of Information Law at the University of Amsterdam, and the European Hub of the Network of Centers invited academic research institutions, as well as junior and senior scholars to reflect on the issues around corporate influence on research through money, data, infrastructure, access. The discussion arrived at a number of important conclusions:
- The discussion on funding must include data, infrastructure deals, and other forms of indirect funding
- Sometimes corporate funding is the only way to get access to critical resources
- Transparency is a must, but not a silver bullet to deal with funding
- It is difficult to set up universal a priori norms of which type of funding is acceptable in which situations,
- Academia may need new institutional solutions to review funding, and manage the potential risks of funders taking over the agenda, research bias, and reputational harms
- Public funding bodies are part of the problem as much of the solution.
The rapid, but consequential shifts in the digital landscape in terms of technological innovation, dominant economic actors, power relations, social, political structures, transform the environment of academic research which aims to address the legal and policy issues around those changes. More and more issues, such as content moderation, intermediary liability, digital advertising, algorithmic discrimination, the accountability of AI systems are framed as regulatory dilemmas. As a result, legal research is both in growing demand, and has gained visibility, and significance. As the future rules of the information society are shaping up in the discussions led, or at least prominently shaped by information law research, the temptation to influence it also increases. Research institutions must acknowledge the shifting landscape and the growing stakes. Challenges at that scale require more than individual integrity: there is a need for institutional solutions that on the one hand can actively assess, and mitigate the potential harms in each individual case, and on the other hand, is able to actively shape the funding landscape, and the norms around funding.},
note = {Amsterdam: IViR, Berlin: European Hub of the NoC},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Corporate funding is a contentious issue in information law and policy research. In the fall of 2019, the Institute of Information Law at the University of Amsterdam, and the European Hub of the Network of Centers invited academic research institutions, as well as junior and senior scholars to reflect on the issues around corporate influence on research through money, data, infrastructure, access. The discussion arrived at a number of important conclusions:
- The discussion on funding must include data, infrastructure deals, and other forms of indirect funding
- Sometimes corporate funding is the only way to get access to critical resources
- Transparency is a must, but not a silver bullet to deal with funding
- It is difficult to set up universal a priori norms of which type of funding is acceptable in which situations,
- Academia may need new institutional solutions to review funding, and manage the potential risks of funders taking over the agenda, research bias, and reputational harms
- Public funding bodies are part of the problem as much of the solution.
The rapid, but consequential shifts in the digital landscape in terms of technological innovation, dominant economic actors, power relations, social, political structures, transform the environment of academic research which aims to address the legal and policy issues around those changes. More and more issues, such as content moderation, intermediary liability, digital advertising, algorithmic discrimination, the accountability of AI systems are framed as regulatory dilemmas. As a result, legal research is both in growing demand, and has gained visibility, and significance. As the future rules of the information society are shaping up in the discussions led, or at least prominently shaped by information law research, the temptation to influence it also increases. Research institutions must acknowledge the shifting landscape and the growing stakes. Challenges at that scale require more than individual integrity: there is a need for institutional solutions that on the one hand can actively assess, and mitigate the potential harms in each individual case, and on the other hand, is able to actively shape the funding landscape, and the norms around funding. |
Appelman, N., Fahy, R., Helberger, N., Leerssen, P., McGonagle, T., van Eijk, N., van Hoboken, J. De verspreiding van desinformatie via internetdiensten en de regulering van politieke advertenties 2019, (Tussenrapportage oktober 2019). @techreport{vanHoboken2019c,
title = {De verspreiding van desinformatie via internetdiensten en de regulering van politieke advertenties},
author = {van Hoboken, J. and Appelman, N. and Fahy, R. and Leerssen, P. and McGonagle, T. and van Eijk, N. and Helberger, N.},
url = {https://www.ivir.nl/publicaties/download/verspreiding_desinformatie_internetdiensten_tussenrapportage.pdf},
year = {2019},
date = {2019-10-31},
abstract = {Rapport in opdracht van het Ministerie van Binnenlandse Zaken en Koninkrijksrelaties, bijlage bij Kamerstuk 2019-2020, 30821, nr. 91, Tweede Kamer.},
note = {Tussenrapportage oktober 2019},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Rapport in opdracht van het Ministerie van Binnenlandse Zaken en Koninkrijksrelaties, bijlage bij Kamerstuk 2019-2020, 30821, nr. 91, Tweede Kamer. |