Bodó, B., van den Brink, J.E., Janssen, H., Meiring, A., Ortlep, R., van Eechoud, M., van Hoboken, J., Verboeket, L.W. Gemeentelijke grip op private sensorgegevens: Juridisch kader voor het gemeentelijke handelingsperspectief bij de verwerking van private sensorgegevens in de openbare ruimte 2023, (Onderzoek in opdracht van de gemeente Amsterdam). @techreport{nokey,
title = {Gemeentelijke grip op private sensorgegevens: Juridisch kader voor het gemeentelijke handelingsperspectief bij de verwerking van private sensorgegevens in de openbare ruimte},
author = {Janssen, H. and Verboeket, L.W. and Meiring, A. and van Hoboken, J. and van Eechoud, M. and van den Brink, J.E. and Ortlep, R. and Bod\'{o}, B.},
url = {https://www.ivir.nl/publicaties/download/Gemeentelijke_grip_op_private_sensorgegevens.pdf},
year = {2023},
date = {2023-07-06},
note = {Onderzoek in opdracht van de gemeente Amsterdam},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
|
Janssen, H., Seng Ah Lee, M., Singh, J. Practical fundamental rights impact assessments In: International Journal of Law and Information, vol. 30, ed. 2, pp. 200-232, 2022. @article{nokey,
title = {Practical fundamental rights impact assessments},
author = {Janssen, H. and Seng Ah Lee, M. and Singh, J.},
doi = {10.1093/ijlit/eaac018},
year = {2022},
date = {2022-11-21},
journal = {International Journal of Law and Information},
volume = {30},
issue = {2},
pages = {200-232},
abstract = {The European Union’s General Data Protection Regulation tasks organizations to perform a Data Protection Impact Assessment (DPIA) to consider fundamental rights risks of their artificial intelligence (AI) system. However, assessing risks can be challenging, as fundamental rights are often considered abstract in nature. So far, guidance regarding DPIAs has largely focussed on data protection, leaving broader fundamental rights aspects less elaborated. This is problematic because potential negative societal consequences of AI systems may remain unaddressed and damage public trust in organizations using AI. Towards this, we introduce a practical, four-Phased framework, assisting organizations with performing fundamental rights impact assessments. This involves organizations (i) defining the system’s purposes and tasks, and the responsibilities of parties involved in the AI system; (ii) assessing the risks regarding the system’s development; (iii) justifying why the risks of potential infringements on rights are proportionate; and (iv) adopt organizational and/or technical measures mitigating risks identified. We further indicate how regulators might support these processes with practical guidance.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
The European Union’s General Data Protection Regulation tasks organizations to perform a Data Protection Impact Assessment (DPIA) to consider fundamental rights risks of their artificial intelligence (AI) system. However, assessing risks can be challenging, as fundamental rights are often considered abstract in nature. So far, guidance regarding DPIAs has largely focussed on data protection, leaving broader fundamental rights aspects less elaborated. This is problematic because potential negative societal consequences of AI systems may remain unaddressed and damage public trust in organizations using AI. Towards this, we introduce a practical, four-Phased framework, assisting organizations with performing fundamental rights impact assessments. This involves organizations (i) defining the system’s purposes and tasks, and the responsibilities of parties involved in the AI system; (ii) assessing the risks regarding the system’s development; (iii) justifying why the risks of potential infringements on rights are proportionate; and (iv) adopt organizational and/or technical measures mitigating risks identified. We further indicate how regulators might support these processes with practical guidance. |
Ausloos, J., Delacroix, S., Giannopoulou, A., Janssen, H. Intermediating data rights exercises: the role of legal mandates In: International Data Privacy Law, vol. 12, ed. 4, pp. 316-331, 2022. @article{nokey,
title = {Intermediating data rights exercises: the role of legal mandates},
author = {Giannopoulou, A. and Ausloos, J. and Delacroix, S. and Janssen, H.},
doi = {10.1093/idpl/ipac017},
year = {2022},
date = {2022-11-15},
journal = {International Data Privacy Law},
volume = {12},
issue = {4},
pages = {316-331},
abstract = {Data subject rights constitute critical tools for empowerment in the digitized society. There is a growing trend of relying on third parties to facilitate or coordinate the collective exercises of data rights, on behalf of one or more data subjects.
This contribution refers to these parties as ‘Data Rights Intermediaries’ (DRIs), ie where an ‘intermediating’ party facilitates or enables the collective exercise of data rights. The exercise of data rights by these DRIs on behalf of the data subjects can only be effectuated with the help of mandates.
Data rights mandates are not expressly framed in the GDPR their delineation can be ambiguous. It is important to highlight that data rights are mandatable and this without affecting their inalienability in light of their fundamental rights’ nature.
This article argues that contract law and fiduciary duties both have longstanding traditions and robust norms in many jurisdictions, all of which can be explored towards shaping the appropriate environment to regulate data rights mandates in particular.
The article concludes that the key in unlocking the full potential of data rights mandates can already be found in existing civil law constructs, whose diversity reveals the need for solidifying the responsibility and accountability of mandated DRIs. The continued adherence to fundamental contract law principles will have to be complemented by a robust framework of institutional safeguards. The need for such safeguards stems from the vulnerable position of data subjects, both vis-\`{a}-vis DRIs as well as data controllers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Data subject rights constitute critical tools for empowerment in the digitized society. There is a growing trend of relying on third parties to facilitate or coordinate the collective exercises of data rights, on behalf of one or more data subjects.
This contribution refers to these parties as ‘Data Rights Intermediaries’ (DRIs), ie where an ‘intermediating’ party facilitates or enables the collective exercise of data rights. The exercise of data rights by these DRIs on behalf of the data subjects can only be effectuated with the help of mandates.
Data rights mandates are not expressly framed in the GDPR their delineation can be ambiguous. It is important to highlight that data rights are mandatable and this without affecting their inalienability in light of their fundamental rights’ nature.
This article argues that contract law and fiduciary duties both have longstanding traditions and robust norms in many jurisdictions, all of which can be explored towards shaping the appropriate environment to regulate data rights mandates in particular.
The article concludes that the key in unlocking the full potential of data rights mandates can already be found in existing civil law constructs, whose diversity reveals the need for solidifying the responsibility and accountability of mandated DRIs. The continued adherence to fundamental contract law principles will have to be complemented by a robust framework of institutional safeguards. The need for such safeguards stems from the vulnerable position of data subjects, both vis-à-vis DRIs as well as data controllers. |
Janssen, H. Opinie: Commerciële datakluizen lossen problemen met big tech niet op In: De Volkskrant, 2022. @article{nokey,
title = {Opinie: Commerci\"{e}le datakluizen lossen problemen met big tech niet op},
author = {Janssen, H.},
url = {https://archive.ph/dQqU3},
year = {2022},
date = {2022-11-09},
journal = {De Volkskrant},
abstract = {Om de burger te behoeden voor de grote controle-, heers- en geldzucht van big tech, worden steeds vaker zogenaamde ‘datakluizen’ aangeboden. Maar zijn de digitale gegevens die we tikkend en klikkend vanuit die kluis delen wel beter af?},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Om de burger te behoeden voor de grote controle-, heers- en geldzucht van big tech, worden steeds vaker zogenaamde ‘datakluizen’ aangeboden. Maar zijn de digitale gegevens die we tikkend en klikkend vanuit die kluis delen wel beter af? |
Janssen, H., Singh, J. Personal Information Management Systems In: Internet Policy Review, vol. 11, ed. 2, 2022, (Glossary of decentralised technosocial systems). @article{nokey,
title = {Personal Information Management Systems},
author = {Janssen, H. and Singh, J.},
doi = {10.14763/2022.2.1659 },
year = {2022},
date = {2022-06-16},
urldate = {2022-06-16},
journal = {Internet Policy Review},
volume = {11},
issue = {2},
abstract = {Personal Information Management Systems (PIMS) seek to empower users by equipping them with mechanisms for mediating, monitoring and controlling how their data is accessed, used, or shared.},
note = {Glossary of decentralised technosocial systems},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Personal Information Management Systems (PIMS) seek to empower users by equipping them with mechanisms for mediating, monitoring and controlling how their data is accessed, used, or shared. |
Janssen, H., Singh, J. Data intermediary In: Internet Policy Review, vol. 11, ed. 1, 2022, (Glossary of decentralised technosocial systems). @article{nokey,
title = {Data intermediary},
author = {Janssen, H. and Singh, J.},
doi = {10.14763/2022.1.1644},
year = {2022},
date = {2022-06-16},
journal = {Internet Policy Review},
volume = {11},
issue = {1},
abstract = {Data intermediaries serve as a mediator between those who wish to make their data available, and those who seek to leverage that data. The intermediary works to govern the data in specific ways, and provides some degree of confidence regarding how the data will be used.},
note = {Glossary of decentralised technosocial systems},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Data intermediaries serve as a mediator between those who wish to make their data available, and those who seek to leverage that data. The intermediary works to govern the data in specific ways, and provides some degree of confidence regarding how the data will be used. |
Cobbe, J., Janssen, H., Seng Ah Lee, M., Singh, J. Defining the scope of AI ADM system risk assessment In: Research handbook on EU data protection law, E. Kosta, R. Leenes & I. Kamara (ed.), Hoofstuk 16, pp. 405-434, Edgar Elgar Publishing, 2022. @inbook{nokey,
title = {Defining the scope of AI ADM system risk assessment},
author = {Janssen, H. and Seng Ah Lee, M. and Singh, J. and Cobbe, J.},
year = {2022},
date = {2022-06-16},
booktitle = {Research handbook on EU data protection law, E. Kosta, R. Leenes \& I. Kamara (ed.)},
pages = {405-434},
publisher = {Edgar Elgar Publishing},
chapter = {16},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
|
Bodó, B., Janssen, H. Maintaining trust in a technologized public sector In: Policy and Society, 2022. @article{nokey,
title = {Maintaining trust in a technologized public sector},
author = {Bod\'{o}, B. and Janssen, H.},
doi = {https://doi.org/10.1093/polsoc/puac019},
year = {2022},
date = {2022-05-19},
journal = {Policy and Society},
abstract = {Emerging technologies permeate and potentially disrupt a wide spectrum of our social, economic, and political relations. Various state institutions, including education, law enforcement, and healthcare, increasingly rely on technical components, such as automated decision-making systems, e-government systems, and other digital tools to provide cheap, efficient public services, and supposedly fair, transparent, disinterested, and accountable public administration. The increased interest in various blockchain-based solutions from central bank digital currencies, via tokenized educational credentials, and distributed ledger-based land registries to self-sovereign identities is the latest, still mostly unwritten chapter in a long history of standardized, objectified, automated, technocratic, and technologized public administration. The rapid, (often) unplanned, and uncontrolled technologization of public services (as happened in the hasty adoption of distance-learning and teleconferencing systems during Corona Virus Disease (COVID) lockdowns) raises complex questions about the use of novel technological components, which may or may not be ultimately adequate for the task for which they are used. The question whether we can trust the technical infrastructures the public sector uses when providing public services is a central concern in an age where trust in government is declining: If the government’s artificial intelligence system that detects welfare fraud fails, the public’s confidence in the government is ultimately hit. In this paper, we provide a critical assessment of how the use of potentially untrustworthy (private) technological systems including blockchain-based systems in the public sector may affect trust in government. We then propose several policy options to protect the trust in government even if some of their technological components prove fundamentally untrustworthy.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Emerging technologies permeate and potentially disrupt a wide spectrum of our social, economic, and political relations. Various state institutions, including education, law enforcement, and healthcare, increasingly rely on technical components, such as automated decision-making systems, e-government systems, and other digital tools to provide cheap, efficient public services, and supposedly fair, transparent, disinterested, and accountable public administration. The increased interest in various blockchain-based solutions from central bank digital currencies, via tokenized educational credentials, and distributed ledger-based land registries to self-sovereign identities is the latest, still mostly unwritten chapter in a long history of standardized, objectified, automated, technocratic, and technologized public administration. The rapid, (often) unplanned, and uncontrolled technologization of public services (as happened in the hasty adoption of distance-learning and teleconferencing systems during Corona Virus Disease (COVID) lockdowns) raises complex questions about the use of novel technological components, which may or may not be ultimately adequate for the task for which they are used. The question whether we can trust the technical infrastructures the public sector uses when providing public services is a central concern in an age where trust in government is declining: If the government’s artificial intelligence system that detects welfare fraud fails, the public’s confidence in the government is ultimately hit. In this paper, we provide a critical assessment of how the use of potentially untrustworthy (private) technological systems including blockchain-based systems in the public sector may affect trust in government. We then propose several policy options to protect the trust in government even if some of their technological components prove fundamentally untrustworthy. |
Janssen, H. De toekomst van de digitale rechtsstaat. Pleidooi voor het gebruik van een mensenrechten impact assessment voor de publieke sector In: (L)aw Matters: Blogs and Essays in Honour of prof. dr. Aalt Willem Hering, Hoofstuk 34, pp. 198-204, Boekenmaker, 2022. @inbook{nokey,
title = {De toekomst van de digitale rechtsstaat. Pleidooi voor het gebruik van een mensenrechten impact assessment voor de publieke sector},
author = {Janssen, H.},
url = {https://www.globalacademicpress.com/ebooks/sascha_hardt/},
year = {2022},
date = {2022-03-25},
urldate = {2022-03-25},
booktitle = {(L)aw Matters: Blogs and Essays in Honour of prof. dr. Aalt Willem Hering},
pages = {198-204},
publisher = {Boekenmaker},
chapter = {34},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
|
Janssen, H. Persoonlijke PIMS: privacyfort of luchtkasteel? In: Privacy & Informatie, nr. 5, pp. 214-225, 2021. @article{Janssen2021c,
title = {Persoonlijke PIMS: privacyfort of luchtkasteel?},
author = {Janssen, H.},
year = {2021},
date = {2021-10-28},
journal = {Privacy \& Informatie},
number = {5},
pages = {214-225},
abstract = {Persoonsgegevens worden thans veelal op ondoorzichtige wijze, buiten de controle van de betrokkenen verwerkt. Persoonlijke informatiebeheersystemen (PIMS) willen betrokkenen technologische toepassingen aanreiken, die hun meer controle geven over de verwerking van hun persoonsgegevens. PIMS presenteren zich als alternatief voor de huidige, ‘gecentraliseerde’ wijze van gegevensverwerking, waarbij (grote) organisaties persoonsgegevens op meestal ondoorzichtige wijze verzamelen, analyseren en doorgeven aan derden. PIMS bieden betrokkenen technische instrumenten waarmee zij zelf kunnen controleren en bepalen wanneer en aan wie zijn hun gegevens overdragen, en/of analyses over hun gegevens kunnen laten uitvoeren. Hoewel argumenten voor deze ‘decentralisatie’
aantrekkelijk klinken, rijzen vragen over de mate waarin PIMS de problemen met de huidige gegevensverwerking effectief kunnen bestrijden. In dit artikel ligt de focus bij de vraag in hoeverre deze PIMS de machtsongelijkheid tussen betrokkenen en grote organisaties daadwerkelijk kunnen bestrijden, die als gevolg van de huidige gegevensverwerkingspraktijk zijn ontstaan. PIMS kunnen enig inzicht in en controle over gegevensverwerking bieden, maar desondanks zal de machtsongelijkheid grotendeels blijven voortbestaan.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Persoonsgegevens worden thans veelal op ondoorzichtige wijze, buiten de controle van de betrokkenen verwerkt. Persoonlijke informatiebeheersystemen (PIMS) willen betrokkenen technologische toepassingen aanreiken, die hun meer controle geven over de verwerking van hun persoonsgegevens. PIMS presenteren zich als alternatief voor de huidige, ‘gecentraliseerde’ wijze van gegevensverwerking, waarbij (grote) organisaties persoonsgegevens op meestal ondoorzichtige wijze verzamelen, analyseren en doorgeven aan derden. PIMS bieden betrokkenen technische instrumenten waarmee zij zelf kunnen controleren en bepalen wanneer en aan wie zijn hun gegevens overdragen, en/of analyses over hun gegevens kunnen laten uitvoeren. Hoewel argumenten voor deze ‘decentralisatie’
aantrekkelijk klinken, rijzen vragen over de mate waarin PIMS de problemen met de huidige gegevensverwerking effectief kunnen bestrijden. In dit artikel ligt de focus bij de vraag in hoeverre deze PIMS de machtsongelijkheid tussen betrokkenen en grote organisaties daadwerkelijk kunnen bestrijden, die als gevolg van de huidige gegevensverwerkingspraktijk zijn ontstaan. PIMS kunnen enig inzicht in en controle over gegevensverwerking bieden, maar desondanks zal de machtsongelijkheid grotendeels blijven voortbestaan. |
Bodó, B., Giannopoulou, A., Irion, K., Janssen, H. Personal data ordering in context: the interaction of meso-level data governance regimes with macro frameworks In: Internet Policy Review, vol. 10, nr. 3, 2021. @article{Bod\'{o}2021b,
title = {Personal data ordering in context: the interaction of meso-level data governance regimes with macro frameworks},
author = {Bod\'{o}, B. and Irion, K. and Janssen, H. and Giannopoulou, A.},
url = {https://policyreview.info/articles/analysis/personal-data-ordering-context-interaction-meso-level-data-governance-regimes},
doi = {10.14763/2021.3.1581},
year = {2021},
date = {2021-10-11},
urldate = {2021-10-11},
journal = {Internet Policy Review},
volume = {10},
number = {3},
abstract = {The technological infrastructures enabling the collection, processing, and trading of data have fuelled a rapid innovation of data governance models. We differentiate between macro, meso, and micro level models, which correspond to major political blocks; societal-, industry-, or community level systems, and individual approaches, respectively. We focus on meso-level models, which coalesce around: (1) organisations prioritising their own interests over interests of other stakeholders; (2) organisations offering technological and legal tools aiming to empower individuals; (3) community-based data intermediaries fostering collective rights and interests. In this article we assess these meso-level models, and discuss their interaction with the macro-level legal frameworks that have evolved in the US, the EU, and China. The legal landscape has largely remained inconsistent and fragmented, with enforcement struggling to keep up with the latest developments. We argue, first, that the success of meso-logics is largely defined by global economic competition, and, second, that these meso-logics may potentially put the EU’s macro-level framework with its mixed internal market and fundamental rights-oriented model under pressure. We conclude that, given the relative absence of a strong macro level-framework and an intensive competition of governance models at meso-level, it may be challenging to avoid compromises to the European macro framework. },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
The technological infrastructures enabling the collection, processing, and trading of data have fuelled a rapid innovation of data governance models. We differentiate between macro, meso, and micro level models, which correspond to major political blocks; societal-, industry-, or community level systems, and individual approaches, respectively. We focus on meso-level models, which coalesce around: (1) organisations prioritising their own interests over interests of other stakeholders; (2) organisations offering technological and legal tools aiming to empower individuals; (3) community-based data intermediaries fostering collective rights and interests. In this article we assess these meso-level models, and discuss their interaction with the macro-level legal frameworks that have evolved in the US, the EU, and China. The legal landscape has largely remained inconsistent and fragmented, with enforcement struggling to keep up with the latest developments. We argue, first, that the success of meso-logics is largely defined by global economic competition, and, second, that these meso-logics may potentially put the EU’s macro-level framework with its mixed internal market and fundamental rights-oriented model under pressure. We conclude that, given the relative absence of a strong macro level-framework and an intensive competition of governance models at meso-level, it may be challenging to avoid compromises to the European macro framework. |
Cobbe, J., Janssen, H., Norval, C., Singh, J. Data protection and tech startups: The need for attention, support, and scrutiny In: Policy & Internet, vol. 13, ed. 2, pp. 278-299, 2021. @article{nokey,
title = {Data protection and tech startups: The need for attention, support, and scrutiny},
author = {Norval, C. and Janssen, H. and Cobbe, J. and Singh, J.},
doi = {10.1002/poi3.255},
year = {2021},
date = {2021-05-07},
journal = {Policy \& Internet},
volume = {13},
issue = {2},
pages = {278-299},
abstract = {Though discussions of data protection have focused on the larger, more established organisations, startups also warrant attention. This is particularly so for tech startups, who are often innovating at the ‘cutting-edge’\textemdashpushing the boundaries of technologies that typically lack established data protection best-practices. Initial decisions taken by startups could well have long-term impacts, and their actions may inform (for better or for worse) how particular technologies and the applications they support are implemented, deployed, and perceived for years to come. Ensuring that the innovations and practices of tech startups are sound, appropriate and acceptable should therefore be a high priority. This paper explores the attitudes and preparedness of tech startups to issues of data protection. We interviewed a series of UK-based emerging tech startups as the EU's General Data Protection Regulation (GDPR) came into effect, which revealed areas in which there is a disconnect between the approaches of the startups and the nature and requirements of the GDPR. We discuss the misconceptions and associated risks facing innovative tech startups and offer a number of considerations for the firms and supervisory authorities alike. In light of our discussions, and given what is at stake, we argue that more needs to be done to help ensure that emerging technologies and the practices of the companies that operate them better align with the regulatory obligations. We conclude that tech startups warrant increased attention, support, and scrutiny to raise the standard of data protection for the benefit of us all.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Though discussions of data protection have focused on the larger, more established organisations, startups also warrant attention. This is particularly so for tech startups, who are often innovating at the ‘cutting-edge’—pushing the boundaries of technologies that typically lack established data protection best-practices. Initial decisions taken by startups could well have long-term impacts, and their actions may inform (for better or for worse) how particular technologies and the applications they support are implemented, deployed, and perceived for years to come. Ensuring that the innovations and practices of tech startups are sound, appropriate and acceptable should therefore be a high priority. This paper explores the attitudes and preparedness of tech startups to issues of data protection. We interviewed a series of UK-based emerging tech startups as the EU's General Data Protection Regulation (GDPR) came into effect, which revealed areas in which there is a disconnect between the approaches of the startups and the nature and requirements of the GDPR. We discuss the misconceptions and associated risks facing innovative tech startups and offer a number of considerations for the firms and supervisory authorities alike. In light of our discussions, and given what is at stake, we argue that more needs to be done to help ensure that emerging technologies and the practices of the companies that operate them better align with the regulatory obligations. We conclude that tech startups warrant increased attention, support, and scrutiny to raise the standard of data protection for the benefit of us all. |
Cobbe, J., Janssen, H., Singh, J. Personal Data Stores: a user-centric privacy utopia? In: Internet Policy Review, Komende. @article{Janssen2021b,
title = {Personal Data Stores: a user-centric privacy utopia?},
author = {Janssen, H. and Cobbe, J. and Singh, J.},
year = {2021},
date = {2021-01-04},
journal = {Internet Policy Review},
keywords = {},
pubstate = {forthcoming},
tppubtype = {article}
}
|
Cobbe, J., Janssen, H., Norval, C., Singh, J. Decentralised Data Processing: Personal Data Stores and the GDPR In: International Data Privacy Law, vol. 10, nr. 4, pp. 356-384, 2021. @article{Janssen2021,
title = {Decentralised Data Processing: Personal Data Stores and the GDPR},
author = {Janssen, H. and Cobbe, J. and Norval, C. and Singh, J.},
url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3570895
https://www.ivir.nl/publicaties/download/IDPL-2021-4.pdf},
doi = {https://doi.org/10.1093/idpl/ipaa016},
year = {2021},
date = {2021-01-04},
journal = {International Data Privacy Law},
volume = {10},
number = {4},
pages = {356-384},
abstract = {When it comes to online services, users have limited control over how their personal data is processed. This is partly due to the nature of the business models of those services, where data is typically stored and aggregated in data centres. This has recently led to the development of technologies aiming at leveraging user control over the processing of their personal data.
Personal Data Stores (“PDSs”) represent a class of these technologies; PDSs provide users with a device, enabling them to capture, aggregate and manage their personal data. The device provides tools for users to control and monitor access, sharing and computation over data on their device. The motivation for PDSs are described as (i) to assist users with their confidentiality and privacy concerns, and/or (ii) to provide opportunities for users to transact with or otherwise monetise their data.
While PDSs potentially might enable some degree of user empowerment, they raise interesting considerations and uncertainties in relation to the responsibilities under the General Data Protection Regulation (GDPR). More specifically, the designations of responsibilities among key parties involved in PDS ecosystems are unclear. Further, the technical architecture of PDSs appears to restrict certain lawful grounds for processing, while technical means to identify certain category data, as proposed by some, may remain theoretical.
We explore the considerations, uncertainties, and limitations of PDSs with respect to some key obligations under the GDPR. As PDS technologies continue to develop and proliferate, potentially providing an alternative to centralised approaches to data processing, we identify issues which require consideration by regulators, PDS platform providers and technologists.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
When it comes to online services, users have limited control over how their personal data is processed. This is partly due to the nature of the business models of those services, where data is typically stored and aggregated in data centres. This has recently led to the development of technologies aiming at leveraging user control over the processing of their personal data.
Personal Data Stores (“PDSs”) represent a class of these technologies; PDSs provide users with a device, enabling them to capture, aggregate and manage their personal data. The device provides tools for users to control and monitor access, sharing and computation over data on their device. The motivation for PDSs are described as (i) to assist users with their confidentiality and privacy concerns, and/or (ii) to provide opportunities for users to transact with or otherwise monetise their data.
While PDSs potentially might enable some degree of user empowerment, they raise interesting considerations and uncertainties in relation to the responsibilities under the General Data Protection Regulation (GDPR). More specifically, the designations of responsibilities among key parties involved in PDS ecosystems are unclear. Further, the technical architecture of PDSs appears to restrict certain lawful grounds for processing, while technical means to identify certain category data, as proposed by some, may remain theoretical.
We explore the considerations, uncertainties, and limitations of PDSs with respect to some key obligations under the GDPR. As PDS technologies continue to develop and proliferate, potentially providing an alternative to centralised approaches to data processing, we identify issues which require consideration by regulators, PDS platform providers and technologists. |
Cobbe, J., Janssen, H., Seng Ah Lee, M., Singh, J. Centering the Law in the Digital State In: Computer, vol. 53, nr. 10, pp. 47-58, 2020. @article{Cobbe2020,
title = {Centering the Law in the Digital State},
author = {Cobbe, J. and Seng Ah Lee, M. and Singh, J. and Janssen, H.},
doi = {10.1109/MC.2020.3006623},
year = {2020},
date = {2020-09-25},
journal = {Computer},
volume = {53},
number = {10},
pages = {47-58},
abstract = {Driven by the promise of increased efficiencies and cost-savings, the public sector has shown much interest in automated decision-making (ADM) technologies. However, the rule of law and fundamental principles of good government are being lost along the way.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Driven by the promise of increased efficiencies and cost-savings, the public sector has shown much interest in automated decision-making (ADM) technologies. However, the rule of law and fundamental principles of good government are being lost along the way. |
Janssen, H. An approach to a fundamental rights impact assessment to automated decision-making In: International Data Privacy Law, vol. 10, nr. 1, pp. 76-106, 2020. @article{Janssen2020,
title = {An approach to a fundamental rights impact assessment to automated decision-making},
author = {Janssen, H.},
doi = {https://doi.org/10.1093/idpl/ipz028},
year = {2020},
date = {2020-03-06},
journal = {International Data Privacy Law},
volume = {10},
number = {1},
pages = {76-106},
abstract = {Companies and other private institutions see great and promising profits in the use of automated decision-making (‘ADM’) for commercial-, financial- or efficiency in work processing purposes. Meanwhile, ADM based on a data subjects’ personal data may (severely) impact its fundamental rights and freedoms. The General Data Protection Regulation (GDPR) provides for a regulatory framework that applies whenever a controller considers and deploys ADM onto individuals on the basis of their personal data. In the design stage of the intended ADM, article 35 (3)(a) obliges a controller to apply a Data Protection Impact Assessment (DPIA), part of which is an assessment of ADM’s impact on individual rights and freedoms. Article 22 GDPR determines under what conditions ADM is allowed and endows data subjects with increased protection.
Research among companies of various sizes has shown that there is (legal) insecurity about the interpretation of the GDPR (including the provisions relevant to ADM). The first objective of the author is to detect ways forward by offering practical handles to execute a DPIA that includes a slidable assessment of impacts on data subjects’ fundamental rights. This assessment is based on four benchmarks that should help to assess the gravity of potential impacts, i.e. i) to determine the impact on the fundamental right(s) at stake, ii) to establish the context in which the ADM is used, iii) the establishment of who is beneficiary of the use of personal data in the ADM and iv) the establishment who is in control over the data flows in the ADM. From the benchmarks an overall fundamental rights impact assessment about ADM should arise. A second objective is to indicate potential factors and measures that a controller should consider in its risk management after the assessment. The proposed approach should help fostering fair, compliant and trustworthy ADM and contains directions for future research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Companies and other private institutions see great and promising profits in the use of automated decision-making (‘ADM’) for commercial-, financial- or efficiency in work processing purposes. Meanwhile, ADM based on a data subjects’ personal data may (severely) impact its fundamental rights and freedoms. The General Data Protection Regulation (GDPR) provides for a regulatory framework that applies whenever a controller considers and deploys ADM onto individuals on the basis of their personal data. In the design stage of the intended ADM, article 35 (3)(a) obliges a controller to apply a Data Protection Impact Assessment (DPIA), part of which is an assessment of ADM’s impact on individual rights and freedoms. Article 22 GDPR determines under what conditions ADM is allowed and endows data subjects with increased protection.
Research among companies of various sizes has shown that there is (legal) insecurity about the interpretation of the GDPR (including the provisions relevant to ADM). The first objective of the author is to detect ways forward by offering practical handles to execute a DPIA that includes a slidable assessment of impacts on data subjects’ fundamental rights. This assessment is based on four benchmarks that should help to assess the gravity of potential impacts, i.e. i) to determine the impact on the fundamental right(s) at stake, ii) to establish the context in which the ADM is used, iii) the establishment of who is beneficiary of the use of personal data in the ADM and iv) the establishment who is in control over the data flows in the ADM. From the benchmarks an overall fundamental rights impact assessment about ADM should arise. A second objective is to indicate potential factors and measures that a controller should consider in its risk management after the assessment. The proposed approach should help fostering fair, compliant and trustworthy ADM and contains directions for future research. |
Cobbe, J., Janssen, H., Norval, C., Singh, J. Personal Data Stores and the GDPR's lawful grounds for processing personal data 2019. @workingpaper{nokey,
title = {Personal Data Stores and the GDPR's lawful grounds for processing personal data},
author = {Janssen, H. and Cobbe, J. and Norval, C. and Singh, J.},
doi = {10.5281/zenodo.3234902},
year = {2019},
date = {2019-05-29},
abstract = {Personal Data Stores (‘PDSs’) entail users having a (physical or virtual) device within which they themselves can, in theory, capture, aggregate, and control the access to and the transfer of personal data. Their aim is to empower users in relation to their personal data, strengthening their opportunities for data protection, privacy, and/or to facilitate trade and monetisation. As PDS technologies develop, it is important to consider their role in relation to issues of data protection. The General Data Protection Regulation requires that the processing of user data be predicated on one of its defined lawful bases, whereby the Regulation does not favour any one basis over another. We explore how PDS architectures relate to these lawful bases, and observe that they tend to favour the bases that require direct user involvement. This paper considers issues that the envisaged architectural choices surrounding the lawful grounds may entail. },
keywords = {},
pubstate = {published},
tppubtype = {workingpaper}
}
Personal Data Stores (‘PDSs’) entail users having a (physical or virtual) device within which they themselves can, in theory, capture, aggregate, and control the access to and the transfer of personal data. Their aim is to empower users in relation to their personal data, strengthening their opportunities for data protection, privacy, and/or to facilitate trade and monetisation. As PDS technologies develop, it is important to consider their role in relation to issues of data protection. The General Data Protection Regulation requires that the processing of user data be predicated on one of its defined lawful bases, whereby the Regulation does not favour any one basis over another. We explore how PDS architectures relate to these lawful bases, and observe that they tend to favour the bases that require direct user involvement. This paper considers issues that the envisaged architectural choices surrounding the lawful grounds may entail. |
Janssen, H. Constitutionele Interpretatie. Een rechtsvergelijkend onderzoek naar de vaststelling van de reikwijdte van het recht op persoonlijkheid 2003, (Dissertatie Universiteit Maastricht). @phdthesis{Janssen2003,
title = {Constitutionele Interpretatie. Een rechtsvergelijkend onderzoek naar de vaststelling van de reikwijdte van het recht op persoonlijkheid},
author = {Janssen, H.},
url = {https://cris.maastrichtuniversity.nl/ws/portalfiles/portal/38414297/1637229.pdf},
year = {2003},
date = {2003-02-07},
pages = {495},
publisher = {Sdu},
note = {Dissertatie Universiteit Maastricht},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
|