<?xml version="1.0" encoding="utf-8" ?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:syn="http://purl.org/rss/1.0/modules/syndication/" xmlns="http://purl.org/rss/1.0/">




    



<channel rdf:about="https://cis-india.org/search_rss">
  <title>Centre for Internet and Society</title>
  <link>https://cis-india.org</link>
  
  <description>
    
            These are the search results for the query, showing results 41 to 55.
        
  </description>
  
  
  
  
  <image rdf:resource="https://cis-india.org/logo.png"/>

  <items>
    <rdf:Seq>
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/ethical-data-design-practices-in-the-ai-artificial-intelligence-age"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/emerging-ai-technology-in-health-care-in-india-health-equity-and-justice-critical-reflections-and-charting-out-way-forward"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/emergence-of-chinese-technology-rising-stakes-for-innovation-competition-and-governance"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/oxford-human-rights-hub-arindrajit-basu-october-23-2018-discrimination-in-the-age-of-artificial-intelligence"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/deccan-herald-july-14-2019-rajmohan-sudhakar-deepfakes-algorithms-at-war-trust-at-stake"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/curating-genderlog-indias-twitter-handle"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/confidentiality-of-communications-and-privacy-of-data-in-the-digital-age"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/comments-on-niti-aayog-working-document-towards-responsible-aiforall"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/raw/cisxscholars-harsh-gupta-machine-learning-for-lawyers-and-lawmakers-20170629"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/cis-seminar-series"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/big-data-in-india-benefits-harms-and-human-rights-a-report"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/economic-times-anjali-venugopalan-june-4-2019-banking-on-artificial-intelligence"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/artificial-intelligence-a-full-spectrum-regulatory-challenge-working-draft"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/artificial-intelligence-in-india-a-compendium"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/artificial-intelligence-in-governance-a-report-of-the-roundtable-held-in-new-delhi"/>
        
    </rdf:Seq>
  </items>

</channel>


    <item rdf:about="https://cis-india.org/internet-governance/news/ethical-data-design-practices-in-the-ai-artificial-intelligence-age">
    <title>Ethical Data Design Practices in the AI (Artificial Intelligence) Age</title>
    <link>https://cis-india.org/internet-governance/news/ethical-data-design-practices-in-the-ai-artificial-intelligence-age</link>
    <description>
        &lt;b&gt;Shweta Mohandas was a panelist at discussion on Ethical Data Design Practices in the AI (Artificial Intelligence) Age, organised by Startup Grind, Bangalore on July 28, 2018 at NUMA Bangalore. &lt;/b&gt;
        &lt;h2&gt;Agenda&lt;/h2&gt;
&lt;p&gt;&lt;b&gt;Ethical Data Design Practices in the Age&lt;/b&gt;&lt;/p&gt;
&lt;p dir="ltr" style="text-align: justify; "&gt;The panel discussion is intended to explore the challenges we face when designing the user experiences of the complex behavioral agents that increasingly run our lives.&lt;/p&gt;
&lt;p dir="ltr"&gt;Discussion centred around how to:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Understand current thinking by the AI community on ethics and morality in computing and the challenges it presents. &lt;/li&gt;
&lt;li&gt;Explore examples of the ethical choices that products make now and will make in the near future.&lt;/li&gt;
&lt;li&gt;Learn how designers might approach designing experiences that face moral dilemmas.&lt;/li&gt;
&lt;/ul&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/ethical-data-design-practices-in-the-ai-artificial-intelligence-age'&gt;https://cis-india.org/internet-governance/news/ethical-data-design-practices-in-the-ai-artificial-intelligence-age&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-08-01T23:14:21Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/emerging-ai-technology-in-health-care-in-india-health-equity-and-justice-critical-reflections-and-charting-out-way-forward">
    <title>Emerging AI technology in health care in India, health equity and justice: Critical reflections and charting out way forward</title>
    <link>https://cis-india.org/internet-governance/news/emerging-ai-technology-in-health-care-in-india-health-equity-and-justice-critical-reflections-and-charting-out-way-forward</link>
    <description>
        &lt;b&gt;On July 13, 2019, Radhika Radhakrishnan, participated in a roundtable discussion on "Emerging AI technology in health care in India, health equity and justice: Critical reflections and charting out way forward." The event was organized by HEaL (Health, Ethics, and Law Institute of Training, Research and Advocacy) of FMES (Forum for Medical Ethics Society) in collaboration with CPS (Centre for Policy Studies), Indian Institute of Technology-Bombay.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;Radhika chaired a session on the ethics of AI in healthcare in India,       and my main submissions included: the medicalization of and       experimentation on women's bodies under a medical-industrial       complex for the design of AI-based healthcare models, and FAT       (Fairness, Accountability, Transparency) concerns with AI. She was also invited to draft some of this content into a       paper submission to the &lt;a href="https://ijme.in/"&gt;Indian Journal of Medical Ethics&lt;/a&gt; which is a peer-reviewed and indexed academic journal run by FMES.&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/emerging-ai-technology-in-health-care-in-india-health-equity-and-justice-critical-reflections-and-charting-out-way-forward'&gt;https://cis-india.org/internet-governance/news/emerging-ai-technology-in-health-care-in-india-health-equity-and-justice-critical-reflections-and-charting-out-way-forward&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-07-21T15:47:27Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/emergence-of-chinese-technology-rising-stakes-for-innovation-competition-and-governance">
    <title>Emergence of Chinese Technology:Rising stakes for innovation, competition and governance</title>
    <link>https://cis-india.org/internet-governance/news/emergence-of-chinese-technology-rising-stakes-for-innovation-competition-and-governance</link>
    <description>
        &lt;b&gt;Omidyar Network in partnership with the Esya Centre organized a private discussion on the theme “Emergence of Chinese technology - rising stakes for innovation, competition and governance” on Monday, 12 August 2019 in New Delhi. Arindrajit Basu attended the event. &lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;China Ascendant: Soft Power report by ON focuses on three prongs of power-digital power, fore power and sharp power. Standards have been a major avenue for proliferation of Chinese competition.This is combined with knowledge transfer as 2.8 million Chinese students in the US have largely returned to tech companies in China. Core strength is still not in basic research so by 2020, aiming for 15 per cent of PhD.s to be in basic research. China uses nudges in shaping global governance outcomes by targeting the right stakeholders as opposed to altering the ground rules entirely,  Universities in China have focused on how cultural connections can be linked upto negotiating prowess at multilateral fora.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;China takes a whole of government approach to technology innovation. Continues to be consumer focused.&lt;/li&gt;
&lt;li&gt;China does not look at India as a R+D partner,more as a market.Stability and unpredictability has been an issue.None of India's tech policies were drafted with China in mind.&lt;/li&gt;
&lt;/ul&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/emergence-of-chinese-technology-rising-stakes-for-innovation-competition-and-governance'&gt;https://cis-india.org/internet-governance/news/emergence-of-chinese-technology-rising-stakes-for-innovation-competition-and-governance&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-08-19T14:03:21Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/oxford-human-rights-hub-arindrajit-basu-october-23-2018-discrimination-in-the-age-of-artificial-intelligence">
    <title>Discrimination in the Age of Artificial Intelligence </title>
    <link>https://cis-india.org/internet-governance/blog/oxford-human-rights-hub-arindrajit-basu-october-23-2018-discrimination-in-the-age-of-artificial-intelligence</link>
    <description>
        &lt;b&gt;The dawn of Artificial Intelligence (AI) has been celebrated by both government and industry across the globe. AI offers the potential to augment many existing bureaucratic processes and improve human capacity, if implemented in accordance with principles of the rule of law and international human rights norms. Unfortunately, AI-powered solutions have often been implemented in ways that have resulted  in the automation, rather than mitigation, of existing societal inequalities.&lt;/b&gt;
        &lt;p&gt;This was originally published by &lt;a class="external-link" href="http://ohrh.law.ox.ac.uk/discrimination-in-the-age-of-artificial-intelligence/"&gt;Oxford Human Rights Hub&lt;/a&gt; on October 23, 2018&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;&lt;img src="https://cis-india.org/home-images/ArtificialIntelligence.jpg/@@images/3b551d39-e419-442c-8c9d-7916a2d39378.jpeg" alt="Artificial Intelligence" class="image-inline" title="Artificial Intelligence" /&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Image Credit: Sarla Catt via Flickr, used under a Creative Commons license available at https://creativecommons.org/licenses/by/2.0/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In the international human rights law context, AI solutions pose a  threat to norms which prohibit discrimination. International Human  Rights Law &lt;a href="https://books.google.co.in/books/about/International_Human_Rights_Law.html?id=YkcXAgAAQBAJ&amp;amp;redir_esc=y"&gt;recognizes that discrimination&lt;/a&gt; may take place in two possible ways, directly or indirectly. Direct  discrimination occurs when an individual is treated less favourably than  someone else similarly situated on one of the grounds prohibited in  international law, which, as per the &lt;a href="http://www.equalrightstrust.org/ertdocumentbank/Human%20Rights%20Committee,%20General%20Comment%2018.pdf"&gt;Human Rights Committee,&lt;/a&gt; includes race, colour, sex, language, religion, political or other  opinion, national or social origin, property, birth or other status.  Indirect discrimination occurs when a policy, rule or requirement is  ‘outwardly neutral’ but has a disproportionate impact on certain groups  that are meant to be protected by one of the prohibited grounds of  discrimination. A clear example of indirect discrimination recognized by  the European Court of Human Rights arose in the case of &lt;a href="http://www.errc.org/cikk.php?cikk=3559"&gt;&lt;i&gt;DH&amp;amp;Ors v Czech Republic&lt;/i&gt;&lt;/a&gt;.  The ECtHR struck down an apparently neutral set of statutory rules,  which implemented a set of tests designed to evaluate the intellectual  capability of children but which resulted in an excessively high  proportion of minority Roma children scoring poorly and consequently  being sent to special schools, possibly because the tests were blind to  cultural and linguistic differences. This case acts as a useful analogy  for the potential disparate impacts of AI and should serve as useful  precedent for future litigation against AI-driven solutions.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Indirect discrimination by AI may occur &lt;a href="https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf"&gt;at two stages&lt;/a&gt;. First is the &lt;b&gt;usage of incomplete or inaccurate training data&lt;/b&gt; that results in the algorithm processing data that may not accurately reflect reality. Cathy O’Neil explains this &lt;a href="https://weaponsofmathdestructionbook.com/"&gt;using a simple example&lt;/a&gt;.  There are two types of crimes-those that are ‘reported’ and others that  are only ‘found’ if a policeman is patrolling the area. The first  category includes serious crimes such as murder or rape while the second  includes petty crimes such as vandalism or possession of illicit drugs  in small quantities. Increased police surveillance in areas in US cities  where Black or Hispanic people reside lead to more crimes being ‘found’  there. Thus, data is likely to suggest that these communities commit a  higher proportion of crimes than they actually do – indirect  discrimination that has been empirically been shown through research  published by &lt;a href="https://www.propublica.org/article/bias-in-criminal-risk-scores-is-mathematically-inevitable-researchers-say"&gt;Pro Publica&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Discrimination may also occur at the stage of &lt;b&gt;data processing&lt;/b&gt;, which is done through a metaphorical &lt;a href="https://www.sentient.ai/blog/understanding-black-box-artificial-intelligence/"&gt;‘black-box’&lt;/a&gt; that accepts inputs and generates outputs without revealing to the  human developer how the data was processed. This conundrum is compounded  by the fact that the algorithms are often utilised to solve an  amorphous problem-which attempts to break down a complex question into a  simple answer. An example is the development of ‘risk profiles’ of  individuals for the  &lt;a href="http://fortune.com/longform/ai-bias-problem/"&gt;determination of insurance premiums.&lt;/a&gt; Data might show that an accident is more likely to take place in inner  cities due  to more densely packed populations in these areas. Racial  and ethnic minorities tend to reside more in these areas, which means  that algorithms could learn that minorities are more likely to get into  accidents, thereby generating an outcome (‘risk profile’) that  indirectly discriminates on grounds of race or ethnicity.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;It would be wrong to ignore discrimination, both direct and indirect,  that occurs as a result of human prejudice. The key difference between  that and discrimination by AI lies in the ability of other individuals  to compel the decision-maker to explain the factors that lead to the  outcome in question and testing its validity against principles of human  rights. The increasing amounts of discretion and, consequently, power  being delegated to autonomous systems mean that principles of  accountability which audit and check indirect discrimination need to be  built into the design of these systems. In the absence of these  principles, we risk surrendering core tenets of human rights law to the  whims of an algorithmically crafted reality.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/oxford-human-rights-hub-arindrajit-basu-october-23-2018-discrimination-in-the-age-of-artificial-intelligence'&gt;https://cis-india.org/internet-governance/blog/oxford-human-rights-hub-arindrajit-basu-october-23-2018-discrimination-in-the-age-of-artificial-intelligence&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Arindrajit Basu</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-10-26T14:47:57Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/deccan-herald-july-14-2019-rajmohan-sudhakar-deepfakes-algorithms-at-war-trust-at-stake">
    <title>Deepfakes: Algorithms at war, trust at stake</title>
    <link>https://cis-india.org/internet-governance/news/deccan-herald-july-14-2019-rajmohan-sudhakar-deepfakes-algorithms-at-war-trust-at-stake</link>
    <description>
        &lt;b&gt;A case in point is the video that surfaced of an Indian journalist not so long ago.&lt;/b&gt;
        &lt;p&gt;The article by Rajmohan Sudhakar was published in &lt;a class="external-link" href="https://www.deccanherald.com/metrolife/metrolife-on-the-move/deepfakes-algorithms-at-war-trust-at-stake-747042.html"&gt;Deccan Herald&lt;/a&gt; on July 14, 2019. Elonnai Hickok was quoted.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;Now machines are learning to manipulate imagery. That is a real worry. Deepfakes for instance. They are AI-manipulated videos achieved by machine learning. Products of the humongous volume of images and videos now available online.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The danger is, this imagery could be yours or mine. Imagine artificial intelligence of neural networks creating convincing identities of our real counterparts, and starts posting videos. Absurd.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“Society has grappled with spurious and specious content in media over time. Media has been modified for various reasons, usually by those with access to significant resources and influence in the past,” says Elonnai Hickok, COO of the Bengaluru-based Centre for Internet and Society.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;From an AI and machine learning perspective, deepfakes could be understood by what is known as GAN -- generative adversarial networks, essentially two algorithms at war. One is a generator, the other a discriminator. They compete with each other based on set inputs, in time bettering the version they together help create. These are behind what are now known as deepfakes of popular figures floating around online. Barack Obama is seen saying in a purported deepfake, “stay woke bitches”, which of course he did not say.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Another deepfake has Mark Zuckerberg boasting: “I have total control of billions of people’s stolen data, all their secrets, their lives, their futures.” “Deepfakes are media modified by current technology and techniques. Easy availability of technology and media allows anyone to create, tailor or manipulate media for their own ends. Deepfakes present an opportunity for introspection and research into the contours of freedom of expression as well as societal frameworks for dealing with fake content,” explains Hickok.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;One of the horrid instances of a deepfake-like attack was the video that surfaced of an Indian woman journalist not so long ago. Or the child-kidnapping rumours that spread through WhatsApp and the subsequent mob lynchings. However, there’s the view that in post-truth times, deepfakes would be seen with caution in the inherent dilemma over believing what one views online.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“In India, people do not take these so seriously, especially on social media. It is mostly entertainment for many. Now, we are seeing people with diametrically opposing views. They often view content which they like to see. It would rather work as a reinforcer of views than a transformer,” feels political analyst Sandeep Shastri.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Open source software can create basic deepfakes if someone wanted to hurt somebody. The potential scale of danger and damage looms larger for influential figures and nations at war.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“While deep fakes can be used to damage societies, it is important that collectively society takes steps to become sensitised to ways that media can be used to manipulate opinions and choices, and allow people to develop skills that build awareness and context to what they see and believe,” adds Hickok.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A video emerged recently of an ‘Iranian’ boat near an attacked oil tanker in the Persian Gulf. Deepfake or not, the authenticity of the video was questionable. If used wily, it could have triggered a war.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;According to Hickok, society has to get more resilient to manipulation. “This includes spoken, written, seen as well as heard information. We have to learn to question the basis on which we confirm trust. Multiple forms of verification may help to address spurious media and information,” she says.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Deepfakes are no surprise as social media feed into the small and large divisions and differences of multitudes. Emergence of such potentially dangerous AIs isn’t taken quite seriously by the tech czars. In fact, it is a matter of economy for them.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Oscar Schwartz writes in The Guardian that ‘technological solutionism’ in the ‘attention economy’ may not be the real approach. “And herein lies the problem: by formulating deepfakes as a technological problem, we allow social media platforms to promote technological solutions to those problems – cleverly distracting the public from the idea that there may be more fundamental problems with powerful Silicon Valley tech platforms,” Schwartz warns.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“The measures do not fall on the regulators alone. I think, individuals (by introspection and building awareness), society (through education), the legal system (stringent evidentiary requirements and capacity building) industry (differentiating recreational and prejudicial content, tagging content that is manipulated, etc.) and regulators (enabling accountability, oversight, transparency and redress) can all contribute to a more resilient society,” observes Hickok.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In India, viewing a video is still considered close to truth, almost sacred by the vast majority. Necessarily, it would not require a technologically advanced deepfake, especially in the backward rural pockets, to rile up and aggravate biases and prejudices.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“Deepfakes can further existing biases and manipulate opinions and choices. They can disrupt trust inherent in societal groups to co-exist and politically, they can breed distrust in leadership and capability. That said, deepfakes can be used for humour and satire. Ultimately, the impact will be shaped by a number of factors including pre-existing biases, individual response, etc.,” Hickok elaborates.&lt;/p&gt;
&lt;p&gt;On a lighter note, deepfakes could be helpful too. We could very well do away with some of our television news presenters.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/deccan-herald-july-14-2019-rajmohan-sudhakar-deepfakes-algorithms-at-war-trust-at-stake'&gt;https://cis-india.org/internet-governance/news/deccan-herald-july-14-2019-rajmohan-sudhakar-deepfakes-algorithms-at-war-trust-at-stake&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Rajmohan Sudhakar</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-07-21T15:42:12Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/curating-genderlog-indias-twitter-handle">
    <title>Curating Genderlog India's Twitter handle</title>
    <link>https://cis-india.org/internet-governance/news/curating-genderlog-indias-twitter-handle</link>
    <description>
        &lt;b&gt;Shweta Mohandas has been nominated to curate Genderlog's Twitter handle (@genderlogindia).&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;Shweta Mohandas &lt;span&gt;will be tweeting about topics related to gender and data, more specifically around AI, big data, privacy and surveillance. To view the tweets, &lt;a class="external-link" href="https://twitter.com/genderlogindia/status/1127892055231873024"&gt;click here&lt;/a&gt;&lt;/span&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/curating-genderlog-indias-twitter-handle'&gt;https://cis-india.org/internet-governance/news/curating-genderlog-indias-twitter-handle&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Big Data</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2019-05-14T14:40:08Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/confidentiality-of-communications-and-privacy-of-data-in-the-digital-age">
    <title>Confidentiality of Communications and Privacy of Data in the Digital Age</title>
    <link>https://cis-india.org/internet-governance/news/confidentiality-of-communications-and-privacy-of-data-in-the-digital-age</link>
    <description>
        &lt;b&gt;On September 25, 2018, Elonnai Hickok participated in a side event Confidentiality of Communications and Privacy of Data in the Digital Age organized by INCLO and Privacy International at the Human Rights Council 39th ordinary session. Elonnai spoke on artificial intelligence and privacy.&lt;/b&gt;
        
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/confidentiality-of-communications-and-privacy-of-data-in-the-digital-age'&gt;https://cis-india.org/internet-governance/news/confidentiality-of-communications-and-privacy-of-data-in-the-digital-age&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>praskrishna</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-10-28T06:02:07Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/comments-on-niti-aayog-working-document-towards-responsible-aiforall">
    <title> Comments on NITI AAYOG Working Document: Towards Responsible #AIforAll</title>
    <link>https://cis-india.org/internet-governance/blog/comments-on-niti-aayog-working-document-towards-responsible-aiforall</link>
    <description>
        &lt;b&gt;The NITI Aayog Working Document on Responsible AI for All released on 21st July 2020 serves as a significant statement of intent from NITI Aayog, acknowledging the need to ensure that any conception of “Responsible AI” must fulfill constitutional responsibilities, incorporated through workable principles. However, as it is a draft document for discussion, it is important to highlight next steps for research and policy levers to build upon this report.&lt;/b&gt;
        
&lt;div&gt;&amp;nbsp;&lt;/div&gt;
&lt;div&gt;Read our comments in their entirety &lt;a href="https://cis-india.org/internet-governance/comments-to-aiforall-pdf" class="internal-link" title="Comments to AIForAll pdf"&gt;here&lt;/a&gt;.&lt;/div&gt;

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/comments-on-niti-aayog-working-document-towards-responsible-aiforall'&gt;https://cis-india.org/internet-governance/blog/comments-on-niti-aayog-working-document-towards-responsible-aiforall&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Shweta Mohandas, Arindrajit Basu and Ambika Tandon</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>internet governance</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2020-08-18T06:25:18Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/raw/cisxscholars-harsh-gupta-machine-learning-for-lawyers-and-lawmakers-20170629">
    <title>CISxScholars Delhi - Harsh Gupta - FAT ML for Lawyers and Lawmakers (June 29, 5:30 pm)</title>
    <link>https://cis-india.org/raw/cisxscholars-harsh-gupta-machine-learning-for-lawyers-and-lawmakers-20170629</link>
    <description>
        &lt;b&gt;We are proud to announce that Harsh Gupta will discuss "FAT ML (Fairness, Accountability, and Transparency in Machine Learning) for Lawyers and Lawmakers" at the CIS office in Delhi on Thursday, June 29, at 5:30 pm. This will be a two and half hour session: beginning with a 45 minute talk, followed by 15 minute break, another talk for 45 minutes, and then a discussion session. Please RSVP if you are joining us: &lt;raw@cis-india.org&gt;. &lt;/b&gt;
        
&lt;p&gt;&amp;nbsp;&lt;/p&gt;
&lt;p&gt;&lt;em&gt;CISxScholars are informal events organised by CIS for presentation, discussion, and exchange of academic research and policy analysis.&lt;/em&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;h3&gt;&lt;strong&gt;FAT ML (Fairness, Accountability, and Transparency in Machine Learning) for Lawyers and Lawmakers&lt;/strong&gt;&lt;/h3&gt;
&lt;p&gt;From tagging people in photos to determining risk of loan defaults, use of data based tools is affecting more and areas of our lives. In some areas there have been very successful applications of such tools, in others areas they has been found to not only reflect the existing bias and discrimination found in today's society but also exaggerate it.&lt;/p&gt;
&lt;h3&gt;&lt;strong&gt;Harsh Gupta&lt;/strong&gt;&lt;/h3&gt;
&lt;p&gt;Harsh Gupta is a recent graduate from IIT Kharagpur with B.Sc and M.Sc in Mathematics and Computing and will be joining JP Morgan and Chase as a data scientist. He completed his master's thesis in "Discrimination Aware Machine Learning". He was also an intern at The Center for Internet and Society during summer of 2016.&lt;/p&gt;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/raw/cisxscholars-harsh-gupta-machine-learning-for-lawyers-and-lawmakers-20170629'&gt;https://cis-india.org/raw/cisxscholars-harsh-gupta-machine-learning-for-lawyers-and-lawmakers-20170629&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>sumandro</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>FAT ML</dc:subject>
    
    
        <dc:subject>CISxScholars</dc:subject>
    
    
        <dc:subject>Big Data</dc:subject>
    
    
        <dc:subject>Machine Learning</dc:subject>
    
    
        <dc:subject>Researchers at Work</dc:subject>
    
    
        <dc:subject>Event</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2017-06-27T09:16:48Z</dc:date>
   <dc:type>Event</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/cis-seminar-series">
    <title>CIS Seminar Series</title>
    <link>https://cis-india.org/internet-governance/blog/cis-seminar-series</link>
    <description>
        &lt;b&gt;The CIS seminar series will be a venue for researchers to share works-in-progress, exchange ideas, identify avenues for collaboration, and curate research. We also seek to mitigate the impact of Covid-19 on research exchange, and foster collaborations among researchers and academics from diverse geographies. Every quarter we will be hosting a remote seminar with presentations, discussions and debate on a thematic area. &lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The first seminar series was held on 7th and 8th October on the theme of &lt;a href="https://cis-india.org/internet-governance/blog/cis-seminar-series-information-disorder"&gt;‘Information Disorder: Mis-,  Dis- and Malinformation’&lt;/a&gt;,&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Theme for the Second Seminar (to be held online)&lt;/h3&gt;
&lt;h3&gt;Moderating Data, Moderating Lives:  Debating visions of (automated) content moderation in the contemporary&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Artificial Intelligence (AI) and Machine Learning (ML) based approaches have become increasingly popular as “solutions” to curb the extent of mis-, dis- mal-information, hate speech, online violence and harassment on social media. The pandemic and the ensuing work from home policy forced many platforms to shift to automated moderation which further highlighted the inefficacy of existing models (&lt;a href="https://www.zotero.org/google-docs/?u73Lwx"&gt;Gillespie, 2020)&lt;/a&gt; to deal with the surge in misinformation and harassment. These efforts, however, raise a range of interrelated concerns such as freedom and regulation of speech on the privately public sphere of social media platforms; algorithmic governance, censorship and surveillance; the relation between virality, hate, algorithmic design and profits; and social, political and cultural implications of ordering social relations through computational logics of AI/ML.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;On one hand, large-scale content moderation approaches (that include automated AI/ML-based approaches) have been deemed “necessary” given the enormity of data generated &lt;a href="https://www.zotero.org/google-docs/?JHQ0rF"&gt;(Gillespie, 2020)&lt;/a&gt;, on the other hand, they have been regarded as “technological fixtures” offered by the Silicon Valley &lt;a href="https://www.zotero.org/google-docs/?YLFnLm"&gt;(Morozov, 2013)&lt;/a&gt;, or “tyrannical” as they erode existing democratic measures &lt;a href="https://www.zotero.org/google-docs/?Ia8JYp"&gt;(Harari, 2018)&lt;/a&gt;. Alternatively, decolonial, feminist and postcolonial approaches insist on designing AI/ML models that centre voices of those excluded to sustain and further civic spaces on social media (&lt;a href="https://www.zotero.org/google-docs/?1Sa8vf"&gt;Siapera, 2022)&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;From the global south perspective, issues around content moderation foreground the hierarchies inbuilt in the existing knowledge infrastructures. First, platforms remain unwilling to moderate content in under-resourced languages of the global south citing technological difficulties. Second, given the scale and reach of social media platforms and inefficient moderation models, the work is outsourced to workers in the global south who are meant to do the dirty work of scavenging content off these platforms for the global north. Such concerns allow us to interrogate the techno-solutionist approaches as well as their critiques situated in the global north. These realities demand that we articulate a different relationship with AI/ML while also being critical of AI/ML as an instrument of social empowerment for those at the “bottom of the pyramid” &lt;a href="https://www.zotero.org/google-docs/?bvx6mV"&gt;(Arora, 2016)&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The seminar invites scholars interested in articulating nuanced responses to content moderation that take into account the harms perpetrated by algorithmic governance of social relations and irresponsible intermediaries while being cognizant of the harmful effects of mis-, dis- mal-information, hate speech, online violence and harassment on social media.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;We invite abstract submissions that respond to these complexities vis-a-vis content moderation models or propose provocations regarding automated moderation models and their in/efficacy in furthering egalitarian relationships on social media, especially in the global south.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Submissions can reflect on the following themes using legal, policy, social, cultural and political approaches. Also, the list is not exhaustive and abstracts addressing other ancillary concerns are most welcome:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Metaphors of (content) moderation: mediating utopia, dystopia, scepticism surrounding AI/ML approaches to moderation.&lt;/li&gt;
&lt;li&gt;From toxic to healthy, from purity to impurity: Interrogating gendered, racist, colonial tropes used to legitimize content moderation &lt;/li&gt;
&lt;li&gt;Negotiating the link between content moderation, censorship and surveillance in the global south&lt;/li&gt;
&lt;li&gt;Whose values decide what is and is not harmful? &lt;/li&gt;
&lt;li&gt;Challenges of building moderation models for under resourced languages.&lt;/li&gt;
&lt;li&gt;Content moderation, algorithmic governance and social relations. &lt;/li&gt;
&lt;li&gt;Communicating algorithmic governance on social media to the not so “tech-savvy” among us.&lt;/li&gt;
&lt;li&gt;Speculative horizons of content moderation and the future of social relations on the internet. &lt;/li&gt;
&lt;li&gt;Scavenging abuse on social media: Immaterial/invisible labour for making for-profit platforms safer to use.&lt;/li&gt;
&lt;li&gt;Do different platforms moderate differently? Interrogating content moderation on diverse social media platforms, and multimedia content.&lt;/li&gt;
&lt;li&gt;What should and should not be automated? Understanding prevalence of irony, sarcasm, humour, explicit language as counterspeech.&lt;/li&gt;
&lt;li&gt;Maybe we should not automate: Alternative, bottom-up approaches to content moderation&lt;/li&gt;
&lt;/ul&gt;
&lt;h3&gt;Seminar Format&lt;/h3&gt;
&lt;p&gt;We are happy to welcome abstracts for one of two tracks:&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Working paper presentation&lt;/strong&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A working paper presentation would ideally involve a working draft that is presented for about 15 minutes followed by feedback from workshop participants. Abstracts for this track should be 600-800 words in length with clear research questions, methodology, and questions for discussion at the seminar. Ideally, for this track, authors should be able to submit a draft paper two weeks before the conference for circulation to participants.&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Coffee-shop conversations&lt;/strong&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In contrast to the formal paper presentation format, the point of the coffee-shop conversations is to enable an informal space for presentation and discussion of ideas. Simply put, it is an opportunity for researchers to “think out loud” and get feedback on future research agendas. Provocations for this should be 100-150 words containing a short description of the idea you want to discuss.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;We will try to accommodate as many abstracts as possible given time constraints. We welcome submissions from students and early career researchers, especially those from under-represented communities.&lt;/p&gt;
&lt;p&gt;All discussions will be private and conducted under the Chatham House Rule. Drafts will only be circulated among registered participants.&lt;/p&gt;
&lt;p&gt;Please send your abstracts to &lt;a href="mailto:workshops@cis-india.org"&gt;workshops@cis-india.org&lt;/a&gt;.&lt;/p&gt;
&lt;h3&gt;Timeline&lt;/h3&gt;
&lt;div id="_mcePaste"&gt;&lt;ol&gt;
&lt;li&gt;Abstract Submission Deadline: 18th April&lt;/li&gt;
&lt;li&gt;Results of Abstract review: 25th April&lt;/li&gt;
&lt;li&gt;Full submissions (of draft papers): 25th May&lt;/li&gt;
&lt;li&gt;Seminar date: Tentative 31st May&lt;/li&gt;
&lt;/ol&gt;&lt;/div&gt;
&lt;h3&gt;References&lt;/h3&gt;
&lt;p class="MsoNormal" style="text-align:justify; "&gt;&lt;span&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;span&gt;Arora, P. (2016). Bottom of the Data Pyramid: Big Data and the Global South. &lt;/span&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;i&gt;&lt;span&gt;International Journal of Communication&lt;/span&gt;&lt;/i&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;span&gt;, &lt;/span&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;i&gt;&lt;span&gt;10&lt;/span&gt;&lt;/i&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;span&gt;(0), 19.&lt;/span&gt;&lt;/a&gt;&lt;/span&gt;&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p class="MsoNormal" style="text-align:justify; "&gt;&lt;span&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;span&gt;Gillespie, T. (2020). Content moderation, AI, and the question of scale. &lt;/span&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;i&gt;&lt;span&gt;Big Data &amp;amp; Society&lt;/span&gt;&lt;/i&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;span&gt;, &lt;/span&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;i&gt;&lt;span&gt;7&lt;/span&gt;&lt;/i&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;span&gt;(2), 2053951720943234. https://doi.org/10.1177/2053951720943234&lt;/span&gt;&lt;/a&gt;&lt;/span&gt;&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p class="MsoNormal" style="text-align:justify; "&gt;&lt;span&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;span&gt;Harari, Y. N. (2018, August 30). &lt;/span&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;i&gt;&lt;span&gt;Why Technology Favors Tyranny&lt;/span&gt;&lt;/i&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;span&gt;. The Atlantic. https://www.theatlantic.com/magazine/archive/2018/10/yuval-noah-harari-technology-tyranny/568330/&lt;/span&gt;&lt;/a&gt;&lt;/span&gt;&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p class="MsoNormal" style="text-align:justify; "&gt;&lt;span&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;span&gt;Morozov, E. (2013). &lt;/span&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;i&gt;&lt;span&gt;To save everything, click here: The folly of technological solutionism&lt;/span&gt;&lt;/i&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g"&gt;&lt;span&gt; (First edition). PublicAffairs.&lt;/span&gt;&lt;/a&gt;&lt;/span&gt;&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g" style="text-align: justify; "&gt;Siapera, E. (2022). AI Content Moderation, Racism and (de)Coloniality. &lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g" style="text-align: justify; "&gt;&lt;i&gt;International Journal of Bullying Prevention&lt;/i&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g" style="text-align: justify; "&gt;, &lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g" style="text-align: justify; "&gt;&lt;i&gt;4&lt;/i&gt;&lt;/a&gt;&lt;a href="https://www.zotero.org/google-docs/?ZHb88g" style="text-align: justify; "&gt;(1), 55–65. https://doi.org/10.1007/s42380-021-00105-7&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/cis-seminar-series'&gt;https://cis-india.org/internet-governance/blog/cis-seminar-series&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Cheshta Arora</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Machine Learning</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Event</dc:subject>
    
    
        <dc:subject>Seminar</dc:subject>
    

   <dc:date>2022-04-11T15:19:11Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/big-data-in-india-benefits-harms-and-human-rights-a-report">
    <title>Big Data in India: Benefits, Harms, and Human Rights - Workshop Report</title>
    <link>https://cis-india.org/internet-governance/big-data-in-india-benefits-harms-and-human-rights-a-report</link>
    <description>
        &lt;b&gt;The Centre for Internet and Society held a one-day workshop on “Big Data in India: Benefits, Harms and Human Rights” at India Habitat Centre, New Delhi on the 1st of October, 2016.  This report is a compilation of the the issues discussed, ideas exchanged and challenges recognized during the workshop. The objective of the workshop was to discuss aspects of big data technologies in terms of harms, opportunities and human rights. The discussion was designed around an extensive study of current and potential future uses of big data for governance in India, that CIS has undertaken over the last year with support from the MacArthur Foundation.&lt;/b&gt;
        
&lt;p&gt;&amp;nbsp;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Contents&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;&lt;a href="#1"&gt;&lt;strong&gt;Big Data: Definitions and Global South Perspectives&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a href="#2"&gt;&lt;strong&gt;Aadhaar as Big Data&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a href="#3"&gt;&lt;strong&gt;Seeding&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a href="#4"&gt;&lt;strong&gt;Aadhaar and Data Security&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a href="#5"&gt;&lt;strong&gt;Aadhaar’s Relational Arrangement with Big Data Scheme&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a href="#6"&gt;&lt;strong&gt;The Myths surrounding Aadhaar&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a href="#7"&gt;&lt;strong&gt;IndiaStack and FinTech Apps&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a href="#8"&gt;&lt;strong&gt;Problems with UID&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;h2 id="1"&gt;Big Data: Definitions and Global South Perspectives&lt;/h2&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;“Big Data” has been defined by multiple scholars till date. The first consideration at the workshop was to discuss various definitions of big data, and also to understand what could be considered Big Data in terms of governance, especially in the absence of academic consensus. One of the most basic ways to define it, as given by the National Institute of Standards and Technology, USA, is to take it to be the data that is beyond the computational capacity of current systems. This definition has been accepted by the UIDAI of India. Another participant pointed out that Big Data is not only indicative of size, but rather the nature of data which is unstructured, and continuously flowing. The Gartner definition of Big Data relies on the three Vs i.e. Volume (size), Velocity (infinite number of ways in which data is being continuously collected) and Variety (the number of ways in which data can be collected in rows and columns).&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The presentation also looked at ways in which Big Data is different from traditional data. It was pointed out that it can accommodate diverse unstructured datasets, and it is ‘relational’ i.e. it needs the presence of common field(s) across datasets which allows these fields to be conjoined. For e.g., the UID in India is being linked to many different datasets, and they don’t constitute Big Data separately, but do so together. An increasingly popular definition is to define data as “Big Data” based on what can be achieved through it. It has been described by authors as the ability to harness new kinds of insight which can inform decision making. It was pointed out that CIS does not subscribe to any particular definition, and is still in the process of coming up with a comprehensive definition of Big Data.&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Further, discussion touched upon the approach to Big Data in the Global South. It was pointed out that most discussions about Big Data in the Global South are about the kind of value that it can have, the ways in which it can change our society. The Global North, on the other hand, &amp;nbsp;has moved on to discussing the ethics and privacy issues associated with Big Data.&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;After this, the presentation focussed on case studies surrounding key Central Government initiatives and projects like Aadhaar, Predictive Policing, and Financial Technology (FinTech).&lt;/p&gt;
&lt;h2 id="2"&gt;Aadhaar as Big Data&lt;/h2&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;In presenting CIS’ case study on Aadhaar, it was pointed out that initially, Aadhaar, with its enrollment dataset was by itself being seen as Big Data. However, upon careful consideration in light of definitions discussed above, it can be seen as something that enables Big Data. The different e-governance projects within Digital India, along with Aadhaar, constitute Big Data. The case study discussed the Big Data implications of Aadhaar, and in particular looked at a ‘cradle to grave’ identity mapping through various e-government projects and the datafication of various transaction generated data.&lt;/p&gt;
&lt;h2 id="3"&gt;Seeding&lt;/h2&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Any digital identity like Aadhaar typically has three features: 1. Identification i.e. a number or card used to identify yourself; 2. Authentication, which is based on your number or card and any other digital attributes that you might have; 3. Authorisation: As bearers of the digital identity, we can authorise the service providers to take some steps on our behalf. The case study discussed ‘seeding’ which enables the Big Data aspects of Digital India. In the process of seeding, different government databases can be seeded with the UID number using a platform called Ginger. Due to this, other databases can be connected to UIDAI, and through it, data from other databases can be queried by using your Aadhaar identity itself. This is an example of relationality, where fractured data is being brought together. At the moment, it is not clear whether this access by UIDAI means that an actual physical copy of such data from various sources will be transferred to UIDAI’s servers or if they will &amp;nbsp;just access it through internet, but the data remains on the host government agency’s server. An example of even private parties becoming a part of this infrastructure was raised by a participant when it was pointed out that Reliance Jio is now asking for fingerprints. This can then be connected to the relational infrastructure being created by UIDAI. The discussion then focused on how such a structure will function, where it was mentioned that as of now, it cannot be said with certainty that UIDAI will be the agency managing this relational infrastructure in the long run, even though it is the one building it.&lt;/p&gt;
&lt;h2 id="4"&gt;Aadhaar and Data Security&lt;/h2&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;This case study also dealt with the sheer lack of data protection legislation in India except for S.43A of the IT Act. The section does not provide adequate protection as the constitutionality of the rules and regulations under S.43A is ambivalent. More importantly, it only refers to private bodies. Hence, any seeding which is being done by the government is outside the scope of data protection legislation. Thus, at the moment, no legal framework covers the processes and the structures being used for datasets. Due to the inapplicability of S.43A to public bodies, questions were raised as to the existence of a comprehensive data protection policy for government institutions. Participants answered the question in the negative. They pointed out that if any government department starts collecting data, they develop their own privacy policy. There are no set guidelines for such policies and they do not address concerns related to consent, data minimisation and purpose limitation at all. Questions were also raised about the access and control over Big Data with government institutions. A tentative answer from a participant was that such data will remain under the control of &amp;nbsp;the domain specific government ministry or department, for e.g. MNREGA data with the Ministry of Rural Development, because the focus is not on data centralisation but rather on data linking. As long as such fractured data is linked and there is an agency that is responsible to link them, this data can be brought together. Such data is primarily for government agencies. But the government is opening up certain aspects of the data present with it for public consumption for research and entrepreneurial purposes.The UIDAI provides you access to your own data after paying a minimal fee. The procedure for such access is still developing.&lt;/p&gt;
&lt;h2 id="5"&gt;Aadhaar’s Relational Arrangement with Big Data Scheme&lt;/h2&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The various Digital India schemes brought in by the government were elucidated during the workshop. It was pointed out that these schemes extend to myriad aspects of a citizen’s daily life and cover all the essential public services like health, education etc. This makes Aadhaar imperative even though the Supreme Court has observed that it is not mandatory for every citizen to have a unique identity number. The benefits of such identity mapping and the ecosystem being generated by it was also enumerated during the discourse. But the complete absence of any data ethics or data confidentiality principles make us unaware of the costs at which these benefits are being conferred on us. Apart from surveillance concerns, the knowledge gap being created between the citizens and the government was also flagged. Three main benefits touted to be provided by Aadhaar were then analysed. The first is the efficient delivery of services. This appears to be an overblown claim as the Aadhaar specific digitisation and automation does not affect the way in which employment will be provided to citizens through MNREGA or how wage payment delays will be overcome. These are administrative problems that Aadhaar and associated technologies cannot solve. The second is convenience to the citizens. The fallacies in this assertion were also brought out and identified. Before the Aadhaar scheme was rolled in, ration cards were issued based on certain exclusion and inclusion criteria.. The exclusion and inclusion criteria remain the same while another hurdle in the form of Aadhaar has been created. As India is still lacking in supporting infrastructure such as electricity, server connectivity among other things, Aadhaar is acting as a barrier rather than making it convenient for citizens to enroll in such schemes.The third benefit is fraud management. Here, a participant pointed out that this benefit was due to digitisation in the form of GPS chips in food delivery trucks and electronic payment and not the relational nature of Aadhaar. Aadhaar is only concerned with the linking up or relational part. About deduplication, it was pointed out how various government agencies have tackled it quite successfully by using technology different from biometrics which is unreliable at the best of times.&lt;/p&gt;
&lt;h2 id="6"&gt;The Myths surrounding Aadhaar&lt;/h2&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The discussion also reflected on the fact that &amp;nbsp;Aadhaar is often considered to be a panacea that subsumes all kinds of technologies to tackle leakages. However, this does not take into account the fact that leakages happen in many ways. A system should have been built to tackle those specific kinds of leakages, but the focus is solely on Aadhaar as the cure for all. Notably, participants &amp;nbsp;who have been a part of the government pointed out how this myth is misleading and should instead be seen as the first step towards a more digitally enhanced country which is combining different technologies through one medium.&lt;/p&gt;
&lt;h2 id="7"&gt;IndiaStack and FinTech Apps&lt;/h2&gt;
&lt;h3 id="71"&gt;What is India Stack?&lt;/h3&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The focus then shifted to another extremely important Big Data project, India Stack, being conceptualised and developed &amp;nbsp;by a team of private developers called iStack, for the NPCI. It builds on the UID project, Jan Dhan Yojana and mobile services trinity to propagate and develop a cashless, presence-less, paperless and granular consent layer based on UID infrastructure to digitise India.&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;A participant pointed out that the idea of India Stack is to use UID as a platform and keep stacking things on it, such that more and more applications are developed. This in turn will help us to move from being a ‘data poor’ country to a ‘data rich’ one. The economic benefits of this data though as evidenced from the TAGUP report - a report about the creation of National Information Utilities to manage the data that is present with the government - is for the corporations and not the common man. The TAGUP report openly talks about privatisation of data.&lt;/p&gt;
&lt;h3 id="72"&gt;Problems with India Stack&lt;/h3&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The granular consent layer of India Stack hasn’t been developed yet but they have proposed to base it on MIT Media Lab’s OpenPDS system. The idea being that, on the basis of the choices made by the concerned person, access to a person’s personal information may be granted to an agency like a bank. What is more revolutionary is that India Stack might even revoke this access if the concerned person expresses a wish to do so or the surrounding circumstances signal to India Stack that it will be prudent to do so. It should be pointed out that the the technology required for OpenPDS is extremely complex and is not available in India. Moreover, it’s not clear how this system would work. Apart from this, even the paperless layer has its faults and has been criticised by many since its inception, because an actual government signed and stamped paper has been the basis of a claim.. In the paperless system, you are provided a Digilocker in which all your papers are stored electronically, on the basis of your UID number. However, it was brought to light that this doesn’t take into account those who either do not want a Digilocker or UID number or cases where they do not have access to their digital records. How in such cases will people make claims?&lt;/p&gt;
&lt;h3 id="73"&gt;A Digital Post-Dated Cheque: It’s Ramifications&lt;/h3&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;A key change that FinTech apps and the surrounding ecosystem want to make is to create a digital post-dated cheque so as to allow individuals to get loans from their mobiles especially in remote areas. This will potentially cut out the need to construct new banks, thus reducing the capital expenditure , while at the same time allowing the credit services to grow. The direct transfer of money between UID numbers without the involvement of banks is a step to further help this ecosystem grow. Once an individual consents to such a system, however, automatic transfer of money from one’s bank accounts will be affected, regardless of the reason for payment. This is different from auto debt deductions done by banks presently, as in the present system banks have other forms of collateral as well. The automatic deduction now is only affected if these other forms are defaulted upon. There is no knowledge as to whether this consent will be reversible or irreversible. As Jan Dhan Yojana accounts are zero balance accounts, the account holder will be bled dry. The implication of schemes such as “Loan in under 8 minutes” were also discussed. The advantage of such schemes is that transaction costs are reduced.The financial institution can thus grant loans for the minimum amount without any additional enquiries. It was pointed out that this new system is based on living on future income much like the US housing bubble crash. Interestingly, in Public Distribution Systems, biometrics are insisted upon even though it disrupts the system. This can be seen as a part of the larger infrastructure to ensure that digital post-dated cheques become a success.&lt;/p&gt;
&lt;h3 id="74"&gt;The Role of FinTech Apps&lt;/h3&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;FinTech ‘apps’ are being presented with the aim of propagating financial inclusion. The Technology Advisory Group for Unique Projects report stated that as managing such information sources is a big task, just like electricity utilities, a National Information Utilities (NIU) should be set up for data sources. These NIUs as per the report will follow a fee based model where they will be charging for their services for government schemes. The report identified two key NIUs namely the National Payments Corporation of India (NPCI) and the Goods and Services Tax Network (GSTN). The key usage that FinTech applications will serve is credit scoring. The traditional credit scoring data sources only comprised a thin file of records for an individual, but the data that FinTech apps collect - &amp;nbsp;a person’s UID number, mobile number. and bank account number all linked up, allow for a far &amp;nbsp;more comprehensive credit rating. Government departments are willing to share this data with FinTech apps as they are getting analysis in return. Thus, by using UID and the varied data sources that have been linked together by UID, a ‘thick file’ is now being created by FinTech apps. Banking apps have not yet gone down the route of FinTech apps to utilise Big Data for credit scoring purposes.&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt; &amp;nbsp;&amp;nbsp;&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The two main problems with such apps is that there is no uniform way of credit scoring. This distorts the rate at which a person has to pay interest. The consent layer adds another layer of complication as refusal to share mobile data with a FinTech app may lead to the app declaring one to be a risky investment thus, subjecting that individual to a &amp;nbsp;higher rate of interest .&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;h3 id="75"&gt;Regulation of FinTech Apps and the UID Infrastructure&lt;/h3&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt; India Stack and the applications that are being built on it, generate a lot of transaction metadata that is very intimate in nature. The privacy aspects of the UID legislation doesn't cover such data. The granular consent layer which has been touted to cover this still has to come into existence. Also, Big Data is based on sharing and linking of data. Here, privacy concerns and Big Data objectives clash. Big Data by its very nature challenges privacy principles like data minimisation and purpose limitation.The need for regulation to cover the various new apps and infrastructure which are being developed was pointed out.&lt;/p&gt;
&lt;h2 id="8"&gt;Problems with UID&lt;/h2&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;It has been observed that any problem present with Aadhaar is usually labelled as a teething problem, it’s claimed that it will be solved in the next 10 years. But, this begs the question - why is the system online right now?&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Aadhaar is essentially a new data condition and a new exclusion or inclusion criteria. Data exclusion modalities as observed in Rajasthan after the introduction of biometric Point of Service (POS) machines at ration shops was found to be 45% of the population availing PDS services. This number also includes those who were excluded from the database by being included in the wrong dataset. There is no information present to tell us how many actual duplicates and how many genuine ration card holders were weeded out/excluded by POS.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;It was also mentioned that any attempt to question Aadhaar is considered to be an attempt to go back to the manual system and this binary thinking needs to change. Big Data has the potential to benefit people, as has been evidenced by the scholarship and pension portals. However, Big Data’s problems arise in systems like PDS, where there is centralised exclusion at the level of the cloud. Moreover, the quantity problem present in the PDS and MNREGA systems persists. There is still the possibility of getting lesser grains and salary even with analysis of biometrics, hence proving that there are better technologies to tackle these problems. Presently, the accountability mechanisms are being weakened as the poor don’t know where to go to for redressal. Moreover, the mechanisms to check whether the people excluded are duplicates or not is not there. At the time of UID enrollment, out of 90 crores, 9 crore were rejected. There was no feedback or follow-up mechanism to figure out why are people being rejected. It was just assumed that they might have been duplicates.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Another problem is the rolling out of software without checking for inefficiencies or problems at a beta testing phase. The control of developers over this software, is so massive that it can be changed so easily without any accountability.. The decision making components of the software are all proprietary like in the the de-duplication algorithm being used by the UIDAI. Thus, this leads to a loss of accountability because the system itself is in flux, none of it is present in public domain and there are no means to analyse it in a transparent fashion..&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;These schemes are also being pushed through due to database politics. On a field study of NPR of citizens, another Big Data scheme, it was found that you are assumed to be an alien if you did not have the documents to prove that you are a citizen. Hence, unless you fulfill certain conditions of a database, you are excluded and are not eligible for the benefits that being on the database afford you.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Why is the private sector pushing for UIDAI and the surrounding ecosystem?&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Financial institutions stand to gain from encouraging the UID as it encourages the credit culture and reduces transaction costs.. Another advantage for the private sector is perhaps the more obvious one, that is allows for efficient marketing of products and services..&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The above mentioned fears and challenges were actually observed on the ground and the same was shown through the medium of a case study in West Bengal on the smart meters being installed there by the state electricity utility. While the data coming in from these smart meters is being used to ensure that a more efficient system is developed,it is also being used as a surrogate for income mapping on the basis of electricity bills being paid. This helps companies profile neighbourhoods. The technical officer who first receives that data has complete control over it and he can easily misuse the data. This case study again shows that instruments like Aadhaar and India Stack are limited in their application and aren’t the panacea that they are portrayed to be.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;A participant &amp;nbsp;pointed out that in the light of the above discussions, the aim appears to be to get all kinds of data, through any source, and once you have gotten the UID, you link all of this data to the UID number, and then use it in all the corporate schemes that are being started. Most of the problems associated with Big Data are being described as teething problems. The India Stack and FinTech scheme is coming in when we already know about the problems being faced by UID. The same problems will be faced by India Stack as well.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Can you opt out of the Aadhaar system and the surrounding ecosystem?&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The discussion then turned towards whether there can be voluntary opting out from Aadhaar. It was pointed out that the government has stated that you cannot opt out of Aadhaar. Further, the privacy principles in the UIDAI bill are ambiguously worded where individuals &amp;nbsp;only have recourse for basic things like correction of your personal information. The enforcement mechanism present in the UIDAI Act is also severely deficient. There is no notification procedure if a data breach occurs. . The appellate body ‘Cyber Appellate Tribunal’ has not been set up in three years.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;CCTNS: Big Data and its Predictive Uses&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;What is Predictive Policing?&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The next big Big Data case study was on the &amp;nbsp;Crime and Criminal Tracking Network &amp;amp; Systems (CCTNS). Originally it was supposed to be a digitisation and interconnection scheme where police records would be digitised and police stations across the length and breadth of the country would be interconnected. But, in the last few years some police departments of states like Chandigarh, Delhi and Jharkhand have mooted the idea of moving on to predictive policing techniques. It envisages the use of existing statistical and actuarial techniques along with many other tropes of data to do so. It works in four ways: 1. By predicting the place and time where crimes might occur; 2. To predict potential future offenders; 3. To create profiles of past crimes in order to predict future crimes; 4. Predicting groups of individuals who are likely to be victims of future crimes.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;How is Predictive Policing done?&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;To achieve this, the following process is followed: 1. Data collection from various sources which includes structured data like FIRs and unstructured data like call detail records, neighbourhood data, crime seasonal patterns etc. 2. Analysis by using theories like the near repeat theory, regression models on the basis of risk factors etc. 3. Intervention&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Flaws in Predictive Policing and questions of bias&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;An obvious weak point in the system is that if the initial data going into the system is wrong or biased, the analysis will also be wrong. Efforts are being made to detect such biases. An important way to do so will be by building data collection practices into the system that protect its accuracy. The historical data being entered into the system is carrying on the prejudices inherited from the British Raj and biases based on religion, caste, socio-economic background etc.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;One participant brought about the issue of data digitization in police stations, and the impact of this haphazard, unreliable data on a Big Data system. This coupled with paucity of data is bound to lead to arbitrary results. An effective example was that of black neighbourhoods in the USA. These are considered problematic and thus they are policed more, leading to a higher crime rate as they are arrested for doing things that white people in an affluent neighbourhood get away with. This in turn further perpetuates the crime rate and it becomes a self-fulfilling prophecy. In India, such a phenomenon might easily develop in the case of migrants, de-notified tribes, Muslims etc. &amp;nbsp;A counter-view on bias and discrimination was offered here. One participant pointed out that problems with haphazard or poor quality of data is not a colossal issue as private companies are willing to fill this void and are actually doing so in exchange for access to this raw data. It was also pointed out how bias by itself is being used as an all encompassing term. There are multiplicities of biases and while analysing the data, care should be taken to keep it in mind that one person’s bias and analysis might and usually does differ from another. Even after a computer has analysed the data, the data still falls into human hands for implementation.&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The issue of such databases being used to target particular communities on the basis of religion, race, caste, ethnicity among other parameters was raised. Questions about control and analysis of data were also discussed, i.e. whether it will be top-down with data analysis being done in state capitals or will this analysis be done at village and thana levels as well too. It was discussed as topointed out how this could play a major role in the success and possible persecutory treatment of citizens, as the policemen at both these levels will have different perceptions of what the data is saying. . It was further pointed out, that at the moment, there’s no clarity on the mode of implementation of Big Data policing systems. Police in the USA have been seen to rely on Big Data so much that they have been seen to become ‘data myopic’. For those who are on the bad side of Big Data, in the Indian context, laws like preventive detention can be heavily misused.There’s a very high chance that predictive policing due to the inherent biases in the system and the prejudices and inefficiency of the legal system will further suppress the already targeted sections of the society. A counterpoint was raised and it was suggested that contrary to our fears, CCTNS might lead to changes in our understanding and help us to overcome longstanding biases.&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Open Knowledge Architecture as a solution to Big Data biases?&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The conference then mulled over the use of ‘Open Knowledge’ architecture to see whether it can provide the solution to rid Big Data of its biases and inaccuracies if enough eyes are there. It was pointed out that Open Knowledge itself can’t provide foolproof protection against these biases as the people who make up the eyes themselves are predominantly male belonging to the affluent sections of the society and they themselves suffer from these biases.&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Who exactly is Big Data supposed to serve?&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The discussion also looked at questions such as who is this data for? Janata Information System (JIS), is a concept developed by MKSS &amp;nbsp;where the data collected and generated by the government is taken to be for the common citizens. For e.g. MNREGA data should be used to serve the purposes of the labourers. The raw data as is available at the moment, usually cannot be used by the common man as it is so vast and full of information that is not useful for them at all. It was pointed out that while using Big Data for policy planning purposes, the actual string of information that turned out to be needed was very little but the task of unravelling this data for civil society purposes is humongous. By presenting the data in the right manner, the individual can be empowered. The importance of data presentation was also flagged. It was agreed upon that the content of the data should be for the labourer and not a MNC, as the MNC has the capability to utilise the raw data on it’s own regardless.&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Concerns about Big Data usage&lt;/p&gt;
&lt;ol&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Participants pointed out that &amp;nbsp;privacy concerns are usually brushed under the table due to a belief that the law is sufficient or that the privacy battle has already been lost. &amp;nbsp;&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;In the absence of knowledge of domain and context, Big Data analysis is quite limited. Big Data’s accuracy and potential to solve problems needs to be factually backed.&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The narrative of Big Data often rests on the assumption that descriptive statistics take over inferential statistics, thus eliminating the need for domain specific knowledge. It is claimed that the data is so big that it will describe everything that we need to know.&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Big Data is creating a shift from a deductive model of scientific rigour to an inductive one. In response to this, a participant offered the idea that troves of good data allow us to make informed questions on the basis of which the deductive model will be formed. A hybrid approach combining both deductive and inductive might serve us best.&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The need to collect the right data in the correct format, in the right place was also expressed.&lt;/p&gt;
&lt;/li&gt;&lt;/ol&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Potential Research Questions &amp;amp; Participants’ Areas of Research&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Following this discussion, participants brainstormed to come up with potential areas of research and research questions. They have been captured below:&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Big Data, Aadhaar and India Stack:&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;ol&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Has Aadhaar been able to tackle illegal ways of claiming services or are local negotiations and other methods still prevalent?&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Is the consent layer of India Stack being developed in a way that provides an opportunity to the UID user to give informed consent? The OpenPDS and its counterpart in the EU i.e. the My Data Structure were designed for countries with strong privacy laws. Importantly, they were meant for information shared on social media and not for an individual’s health or credit history. India is using it in a completely different sphere without strong data protection laws. What were the granular consent layer structures present in the West designed for and what were they supposed to protect?&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The question of ownership of data needs to be studied especially in context of &amp;nbsp;a globalised world where MNCs are collecting copious amounts of data of Indian citizens. What is the interaction of private parties in this regard?&lt;/p&gt;
&lt;/li&gt;&lt;/ol&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Big Data and Predictive Policing:&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;ol&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;How are inequalities being created through the Big Data systems? Lessons should be taken from the Western experience with the advent of predictive policing and other big data techniques - they tend to lead to perpetuation of the current biases which are already ingrained in the system.&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;It was also pointed out how while studying these topics and anything related to technology generally, we become aware of a divide that is present between the computational sciences and social sciences. This divide needs to be erased if Big Data or any kind of data is to be used efficiently. There should be a cross-pollination between different groups of academics. An example of this can be seen to be the ‘computational social sciences departments’ that have been coming up in the last 3-4 years.&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Why are so many interim promises made by Big Data failing? A study of this phenomenon needs to be done from a social science perspective. This will allow one to look at it from a different angle.&lt;/p&gt;
&lt;/li&gt;&lt;/ol&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Studying Big Data:&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;ol&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;What is the historical context of the terms of reference being used for Big Data? The current Big Data debate in India is based on parameters set by the West. For better understanding of Big Data, it was suggested that P.C. Mahalanobis’ experience while conducting the Indian census, (which was the Big Data of that time) can be looked at to get a historical perspective on Big Data. This comparison might allow us to discover questions that are important in the Indian context. It was also suggested that rather than using ‘Big Data’ as a catchphrase &amp;nbsp;to describe these new technological innovations, we need to be more discerning.&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;What are the ideological aspects that must be considered while studying Big Data? What does the dialectical promise of technology mean? It was contended that every time there is a shift in technology, the zeitgeist of that period is extremely excited and there are claims that it will solve everything. There’s a need to study this dialectical promise and the social promise surrounding it.&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Apart from the legitimate fears that Big Data might lead to exclusion, what are the possibilities in which it improve inclusion too?&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;The diminishing barrier between the public and private self, which is a tangent to the larger public-private debate was mentioned.&lt;/p&gt;
&lt;/li&gt;&lt;li style="list-style-type: decimal;" dir="ltr"&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;How does one distinguish between technology failure and process failure while studying Big Data? &amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/p&gt;
&lt;/li&gt;&lt;/ol&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Big Data: A Friend?&lt;/p&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;In the concluding session, the fact that the Big Data moment cannot be wished away was acknowledged. The use of analytics and predictive modelling by the private sector is now commonplace and India has made a move towards a database state through UID and Digital India. The need for a nuanced debate, that does away with the false equivalence of being either a Big Data enthusiast or a luddite is crucial.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;A participant offered two approaches to solving a Big Data problem. The first was the Big Data due process framework which states that if a decision has been taken that impacts the rights of a citizen, it needs to be cross examined. The efficacy and practicality of such an approach is still not clear. The second, slightly paternalistic in nature, was the approach where Big Data problems would be solved at the data science level itself. This is much like the affirmative algorithmic approach which says that if in a particular dataset, the data for the minority community is not available then it should be artificially introduced in the dataset. It was also &amp;nbsp;suggested that carefully calibrated free market competition can be used to regulate Big Data. For e.g. a private personal wallet company that charges higher, but does not share your data at all can be an example of such competition. &amp;nbsp;&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;Another important observation was the need to understand Big Data in a Global South context and account for unique challenges that arise. While the convenience of Big Data is promising, its actual manifestation depends on externalities like connectivity, accurate and adequate data etc that must be studied in the Global South.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p style="text-align: justify;" dir="ltr"&gt;While the promises of Big Data are encouraging, it is also important to examine its impacts and its interaction with people's rights. Regulatory solutions to mitigate the harms of big data while also reaping its benefits need to evolve.&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;
&lt;p&gt;&lt;span id="docs-internal-guid-90fa226f-6157-27d9-30cd-050bdc280875"&gt;&lt;/span&gt;&lt;/p&gt;
&lt;div style="text-align: justify;" dir="ltr"&gt;&amp;nbsp;&lt;/div&gt;

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/big-data-in-india-benefits-harms-and-human-rights-a-report'&gt;https://cis-india.org/internet-governance/big-data-in-india-benefits-harms-and-human-rights-a-report&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Vidushi Marda, Akash Deep Singh and Geethanjali Jujjavarapu</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Human Rights</dc:subject>
    
    
        <dc:subject>UID</dc:subject>
    
    
        <dc:subject>Big Data</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Machine Learning</dc:subject>
    
    
        <dc:subject>Featured</dc:subject>
    
    
        <dc:subject>Digital India</dc:subject>
    
    
        <dc:subject>Aadhaar</dc:subject>
    
    
        <dc:subject>Information Technology</dc:subject>
    
    
        <dc:subject>E-Governance</dc:subject>
    

   <dc:date>2016-11-18T12:58:19Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/economic-times-anjali-venugopalan-june-4-2019-banking-on-artificial-intelligence">
    <title>Banking on artificial intelligence: In hiring drive, Bots are calling the shots now </title>
    <link>https://cis-india.org/internet-governance/news/economic-times-anjali-venugopalan-june-4-2019-banking-on-artificial-intelligence</link>
    <description>
        &lt;b&gt;Algorithms analyse expressions, tone to check for traits such as confidence, anger in video interviews. &lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article by Anjali Venugopalan was &lt;a class="external-link" href="https://economictimes.indiatimes.com/jobs/banking-on-artificial-intelligence-in-hiring-drive-bots-are-calling-the-shots-now/articleshow/69641832.cms"&gt;published in Economic Times &lt;/a&gt;on June 4, 2019, Sunil Abraham was quoted. Also mirrored on &lt;a class="external-link" href="https://tech.economictimes.indiatimes.com/news/technology/in-hiring-drive-bots-are-calling-the-shots-now/69641830"&gt;ET Tech.com&lt;/a&gt;.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;The future of hiring is already upon us. Algorithms are analysing people’s expressions and tone of voice to check for traits such as “confidence” and “happiness” during video interviews. The robotic video assessment software is then used to hire candidates — customer service operators and assistant vice presidents alike — though the process comes with its own set of problems.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Axis Bank used algorithm-based video interviews — along with aptitude tests — to hire around 2,000 customer service officers from a pool of more than 40,000 applicants this year, said Rajkamal Vempati, HR head of the private sector bank, adding it could standardise and scale up the process of hiring.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;HR managers only gave offer letters, he said.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Nirmal Singh, CEO of Wheebox, a division of PeopleStrong which carried out the hiring, said it trained the face-indexing software — sourced from Microsoft — using around 50,000 candidates who had applied to Axis Bank in 2017. The software picked up emotional states such as “nervousness” and “happiness” based on eye movements, expressions and tone of voice and marked the candidates, Singh said. Scores from candidates who were shortlisted were used to come up with the “cutoff ” for these traits. Nirmal Singh, CEO of Wheebox, a division of PeopleStrong which carried out the hiring, said it trained the face-indexing software — sourced from Microsoft — using around50,000 candidates who had applied to Axis Bank in 2017. The software picked up emotional states such as “nervousness” and “happiness” based on eye movements,expressions and tone of voice and marked the candidates, Singh said. Scores from candidates who were shortlisted were used to come up with the “cutoff ” for these traits.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Insurance provider Bajaj Allianz has hired more than 1,600 people, including underwriters and assistant vice presidents, with the help of robotic video assessments that analysed &lt;span&gt;behaviour, said Vikramjeet Singh, chief HR officer, adding it could help reduce human bias. &lt;/span&gt;&lt;span&gt;Insurance provider Bajaj Allianz has hired more than 1,600 people, including underwriters and assistant vice presidents, with the help of robotic video assessments that analysedbehaviour, said Vikramjeet Singh, chief HR officer, adding it could help reduce human bias.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;Concerns over Software's Biases&lt;/span&gt;&lt;/h3&gt;
&lt;p&gt;&lt;span&gt;Talview, a Palo Alto-headquartered company with operations in Singapore and the United States, provided the assessment for the insurer. &lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The software, sourced from Microsoft and IBM, can analyse states such as “anger” and “happiness” from expressions, “confidence” from voice tone and traits like “ability to work ina team” and “decisiveness” from text analysis, according to Rajeev Menon, chief product officer, Talview.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Candidates may be able to beat questionnaires by giving expected answers to questions like “Can you work in a team?”, but video assessments pick up on subtleties in expression and vocabulary, and cannot be gamed, Menon said.Be that as it may, Amazon.com scrapped its artificial intelligence-based recruiting system after it found the AI system biased against women, according to an October 2018 report by Reuters.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The AI system was drawing on data from the past, where more men had made it into the company than women.“If you can fool a human, you can fool a computer,” said Sunil Abraham, executive director of Centre for Internet and Society.Recruitment algorithms could “homogenise the emotional economy” by forcing people to act a certain way, he said.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Since the software is based on expressions and tone of voice, it could disadvantage less expressive people, like those who are autistic, said Wheebox’s Singh.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Facial recognition by companies such as IBM, Microsoft and Amazon got the gender of a dark-skinned woman wrong one out of three times (20-35% error rate), a 2018 study by MIT researcher Joy Buolamwini found. For white males, the error was 0.8%.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Video Assessments&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Facial recognition has nothing to do with video analytics, Wheebox’s Singh said. The two are, however, closely linked, said Animashree Anandkumar, professor of computing andmathematical science at California Institute of Technology.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;She said such software was “deeply problematic”, as it could correlate wrong factors (likegender or skin colour) and show that as the cause for success. It is possible dark-skinned people would be disadvantaged, said Menon of Talview. Thecompany uses facial expression as just one input among many and gives it a low weightage, he said.The software they use is only 39% accurate, and will improve with more data, said and will improve with more data, said Ridhima Gauba, co-founder of Interview Air, a Navi Mumbai-based company that provides a similar service to companies and colleges.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Companies also say video assessments are a risky business.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Bajaj Allianz does not use video assessments for recruitments beyond middle management.  It is “important to see a person physically” when hiring for senior positions, said Asha Sharma, manager (corporate HR) of Everest Industries.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The company, however, uses pre-recorded video interviews — where the computer asks questions — to hire juniors from campuses, she said.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/economic-times-anjali-venugopalan-june-4-2019-banking-on-artificial-intelligence'&gt;https://cis-india.org/internet-governance/news/economic-times-anjali-venugopalan-june-4-2019-banking-on-artificial-intelligence&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Anjali Venugopalan</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-07-02T05:38:26Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/artificial-intelligence-a-full-spectrum-regulatory-challenge-working-draft">
    <title>Artificial Intelligence: a Full-Spectrum Regulatory Challenge [Working Draft]</title>
    <link>https://cis-india.org/internet-governance/artificial-intelligence-a-full-spectrum-regulatory-challenge-working-draft</link>
    <description>
        &lt;b&gt;&lt;/b&gt;
        
&lt;p&gt;Today, there are certain misconceptions regarding the regulation of AI. Some corporations would like us to believe that AI is being developed and used in a regulatory vacuum. Others in civil society organisations believe that AI is a regulatory circumvention strategy deployed by corporations. As a result, these organisations call for onerous regulations targeting corporations. However, some uses of AI by corporations can be completely benign and some uses AI by the state can result in the most egregious human rights violations. Therefore policy makers need to throw every regulatory tool from their arsenal to unlock the benefits of AI and mitigate its harms.&lt;/p&gt;
&lt;p&gt;This policy brief proposes a granular, full spectrum approach to the regulation of AI depending on who is using AI, who is impacted by that use and what human rights are impacted. Everything from deregulation, to forbearance, to updated regulations, to absolute and blanket prohibitions needs to be considered depending on the specifics. This approach stands in contrast to approaches of ethics, omnibus law, homogeneous principles, and human rights, which will result in inappropriate under-regulation or over-regulation of the sector.&lt;/p&gt;
&lt;p&gt;Find a copy of the working draft &lt;a href="https://cis-india.org/internet-governance/artificial-intelligence-a-full-spectrum-regulatory-challenge-working-draft-pdf" class="internal-link" title="Artificial Intelligence: A Full-Spectrum Regulatory Challenge (Working Draft) PDF"&gt;here&lt;/a&gt;.&lt;/p&gt;

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/artificial-intelligence-a-full-spectrum-regulatory-challenge-working-draft'&gt;https://cis-india.org/internet-governance/artificial-intelligence-a-full-spectrum-regulatory-challenge-working-draft&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>sunil</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Regulatory Practices Lab</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2020-08-04T06:10:13Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/artificial-intelligence-in-india-a-compendium">
    <title>Artificial Intelligence in India: A Compendium</title>
    <link>https://cis-india.org/internet-governance/blog/artificial-intelligence-in-india-a-compendium</link>
    <description>
        &lt;b&gt;Artificial Intelligence (AI) is fast emerging as a key technological paradigm in different sectors across the globe including India.&lt;/b&gt;
        
&lt;p style="text-align: justify;"&gt;Towards understanding the state of AI in India, challenges to the development and adoption of the same, and ethical concerns that arise out of the use of AI - CIS is undertaking research to understand and document&amp;nbsp; national developments, discourse, and impact (actual and potential) to ethical and regulatory solutions and compare the same against global developments in the space. As part of this, CIS is creating a compendium of reports that dive into the use of AI across sectors including&amp;nbsp;healthcare, manufacturing, governance, and finance.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;Each report seeks to map the present state of AI in the respective sector. In doing so, it explores:&amp;nbsp;&amp;nbsp;&lt;strong&gt;Use&lt;/strong&gt;: What is the present use of AI in the sector? What is the narrative and discourse around AI in the sector?&amp;nbsp;&lt;strong&gt;Actors&lt;/strong&gt;: Who are the key stakeholders involved in the development, implementation​ ​and​ ​regulation​ ​of​ ​AI​ ​in​ ​the sector?&amp;nbsp;&lt;strong&gt; Impact: &lt;/strong&gt;What is the potential and existing impact of AI in the sector?&amp;nbsp;&amp;nbsp;&lt;strong&gt;Regulation&lt;/strong&gt;: What are the challenges faced in policy making around AI in the sector?&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;The reports are as follows:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;
&lt;div&gt;&lt;a href="https://cis-india.org/internet-governance/ai-and-healthcare-report" class="internal-link" title="AI and Healthcare Report"&gt;AI and the Healthcare Industry in India&lt;/a&gt;&lt;/div&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;div&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/files/AIManufacturingandServices_Report_02.pdf"&gt;AI and the Manufacturing and Services Sector in India&lt;/a&gt;&lt;/div&gt;
&lt;/li&gt;
&lt;li&gt;&lt;a href="https://cis-india.org/internet-governance/files/ai-in-banking-and-finance" class="internal-link" title="AI in Banking and Finance"&gt;AI and the Banking and Finance Industry in India&lt;/a&gt;: (19th June 2018 Update: This case study has been modified to remove interview quotes, which are in the process of being confirmed. The link above is the latest draft of the report.)&lt;/li&gt;&lt;li&gt;&lt;a href="https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf" class="internal-link" title="AI and Governance Case Study pdf"&gt;AI in the Governance Sector in India&lt;br /&gt;&lt;/a&gt;&lt;/li&gt;&lt;/ul&gt;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;
&lt;hr /&gt;
The research is funded by Google India. Comments and feedback are welcome. The reports are drafts.

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/artificial-intelligence-in-india-a-compendium'&gt;https://cis-india.org/internet-governance/blog/artificial-intelligence-in-india-a-compendium&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Centre for Internet &amp; Society</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2023-05-09T06:56:25Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/artificial-intelligence-in-governance-a-report-of-the-roundtable-held-in-new-delhi">
    <title>Artificial Intelligence in Governance: A Report of the Roundtable held in New Delhi</title>
    <link>https://cis-india.org/internet-governance/blog/artificial-intelligence-in-governance-a-report-of-the-roundtable-held-in-new-delhi</link>
    <description>
        &lt;b&gt;This Report provides an overview of the proceedings of the Roundtable on Artificial Intelligence (AI) in Governance, conducted at the Indian Islamic Cultural Centre, in New Delhi on March 16, 2018. The main purpose of the Roundtable was to discuss the deployment and implementation of AI in various aspects of governance within the Indian context. This report summarises the discussions on the development and implementation of AI in various aspects of governance in India. The event was attended by participants from academia, civil society, the legal sector, the finance sector, and the government.&lt;/b&gt;
        &lt;p&gt;&lt;span&gt;Event Report: &lt;/span&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/files/ai-in-governance"&gt;Download&lt;/a&gt;&lt;span&gt; (PDF)&lt;/span&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;This report provides a summary of the proceedings of the Roundtable on Artificial Intelligence (AI) in Governance (hereinafter referred to as ‘the Roundtable’). The Roundtable took place at the India Islamic Cultural Centre in New Delhi on March 16, 2018 and included participation  from academia, civil society, law, finance, and government. The main purpose of the Roundtable was to discuss the deployment and implementation of AI in various aspects of governance within the Indian context.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The Roundtable began with a presentation by Amber Sinha (Centre for Internet and Society - CIS) providing an overview of the CIS’s research objectives and findings thus far. During this presentation, he defined both AI and the scope of CIS’s research, outlining the areas of law enforcement, defense, education, judicial decision making, and the discharging of administrative functions as the main areas of concerns for the study. The presentation then outlined the key AI deployments and implementations that have been identified by the research in each of these areas. Lastly, the presentation raised some of the ethical and legal concerns related to this phenomenon.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;The presentation was followed by the Roundtable discussion that saw various topics in regards to the usages, challenges, ethical considerations and implications of AI in the sector being discussed. This report has identified a number of key themes of importance evident throughout these discussions.These themes include: (1) the meaning and scope of AI, (2) AI’s sectoral applications, (3) human involvement with automated decision making, (4) social and power relations surrounding AI, (5) regulatory approaches to AI and, (6) challenges to adopting AI. These themes in relation to the Roundtable are explored further below.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;Meaning and Scope of AI&lt;/span&gt;&lt;/h3&gt;
&lt;p&gt;&lt;span id="docs-internal-guid-7edcf822-2698-f1fd-35d3-0bcc913c986a"&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p dir="ltr" style="text-align: justify; "&gt;&lt;span&gt;One of the first tasks recommended by the group of participants was to define the meaning and scope of AI and the way those terms are used and adopted today. These concerns included the need to establish a distinction between the use of algorithms, machine learning, automation and artificial intelligence. Several participants believed that establishing consensus around these terms was essential before proceeding towards a stage of developing regulatory frameworks around them.&lt;/span&gt;&lt;/p&gt;
&lt;p dir="ltr" style="text-align: justify; "&gt;&lt;span&gt;The general fact agreed to was that AI as we understand it does not necessarily extend to complete independence in terms of automated decision making but it refers instead to the varying levels of machine learning (ML), and the automation of certain processes that has already been achieved. Several concerns that emerged during the course of the discussion centred around the question of autonomy and transparency in the process of ML and algorithmic processing. Stakeholders recommended that over and above the debates of humans in the loop [1] on the loop [2] and out of the loop, [3] there were several other gaps with respect to AI and its usage in the industry today which also need to be considered before building a roadmap for future usage. Key issues like information asymmetries, communication lags, a lack of transparency, the increased mystification of the coding process and the centralization of power all needed to be examined and analysed under the rubric of developing regulatory frameworks.&lt;/span&gt;&lt;/p&gt;
&lt;p dir="ltr" style="text-align: justify; "&gt;&lt;span&gt;Takeaway Point: The group brought out the need for standardization of terminology as well as the establishment of globally replicable standards surrounding the usage, control and proliferation of AI. The discussion also brought up the problems with universal applicability of norms. One of the participants brought up an issue regarding the lack of normative frameworks around the usage and proliferation of AI. Another participant responded to the concern by alluding to the Asilomar AI principles.[4] The Asilomar AI principles are a set of 23 principles aimed at directing and shaping AI research in the future. The discussion brought out further issues regarding the enforceability as well universal applicability of the principles and their global relevance as well. Participants recommended the development of a shorter, more universally applicable regulatory framework that could address various contextual limitations as well.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;AI Sectoral Applications&lt;/span&gt;&lt;/h3&gt;
&lt;p&gt;&lt;span&gt;Participants mentioned a number of both current and potential applications of AI technologies, referencing the defence sector, the financial sector, and the agriculture sector. There are several developments taking place on the Indian military front with the Committee on AI and National Security being established by the Ministry of Defence. Through the course of the discussion it was also stated that the Indian Armed Forces were very interested in the possibilities of using AI for their own strategic and tactical purposes. From a technological standpoint, however, there has been limited progress in India in researching and developing AI. &lt;/span&gt;&lt;/p&gt;
&lt;p&gt;&lt;span&gt;While India does deploy some Unmanned Aerial Vehicles (UAVs), they are mostly bought from Israel, and often are not autonomous. It was also pointed out that contrary to reportage in the media, the defence establishment in India is extremely cautious about the adoption of autonomous weapons systems, and that the autonomous technology being rolled out by the CAIR is not yet considered trustworthy enough for deployment.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Discussions further revealed that the few technologies that have a relative degree of autonomy are primarily loitering ammunitions and are used to target radar insulations for reconnaissance purposes. One participant mentioned that while most militaries are interested in deploying AI, it is primarily from an Intelligence, Surveillance and Reconnaissance (ISR) perspective. The only exception to this generalization is China where the military ethos and command structure would work better with increased reliance on independent AI systems. One major AI system rolled out by the US is Project Maven which is primarily an ISR system. The aim of using these systems is to improve decision making and enhance data analysis particularly since battlefields generate a lot of data that isn’t used anywhere.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Another sector discussed was the securities market where algorithms were used from an analytical and data collection perspective. A participant referred to the fact that machine learning was being used for processes like credit and trade scoring -- all with humans on the loop. The participant further suggested that while trade scoring was increasingly automated, the overall predictive nature of such technologies remained within a self limiting capacity wherein statistical models, collected data and pattern analysis were used to predict future trends. The participant questioned whether these algorithms could be considered as AI in the truest sense of the term since they primarily performed statistical functions and data analysis.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;One participant also recommended the application of AI to sectors like agriculture with the intention of gradually acclimatizing users to the technology itself. Respondents also stated that while AI technologies were being used in the agricultural space it was primarily from the standpoint of data collection and analysis as opposed to predictive methods. It was mentioned that a challenge to the broad adoption of AI in this sector is the core problem of adopting AI as a methodology – namely information asymmetries, excessive data collection, limited control/centralization and the obfuscatory nature of code – would not be addressed/modified. Lastly, participants also suggested that within the Indian framework not much was being done aside from addressing farmers’ queries and analysing the data from those concerns.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Takeaway Point: The discussion drew attention to the various sectors where AI was currently being used -- such as the military space, agricultural development and the securities market -- as well as potential spaces of application -- such as healthcare and manual scavenging. The key challenges that emerged were information asymmetries with respect to the usage of these technologies as well as limited capacity in terms of technological advancement.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;Human Involvement with Automated Decision Making&lt;/span&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Large parts of discussions throughout the Roundtable event were preoccupied with automated decision making and specifically, the involvement of humans (human on and in the loop) or lack thereof (human out of the loop) in this process. These discussions often took place with considerations of AI for prescriptive and descriptive uses.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Participants expressed that human involvement was not needed when AI was being used for descriptive uses, such as determining relationships between various variables in large data sets. Many agreed to the superior ability of ML and similar AI technologies in describing large and unorganized datasets. It was the prescriptive uses of AI where participants saw the need for human involvement, with many questioning the technology making more important decisions by itself.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;The need for human involvement in automated decision making was further justified by references to various instances of algorithmic bias in the American context. One participant, for example, brought up the use of algorithmic decision making by a school board in the United States for human resource practices (hirings, firing, etc.) based on the standardized test scores of students. In this instance, such practices resulted in the termination of teachers primarily from low income neighbourhoods.[5] The main challenge participants identified in regards to human on the loop automated decision making is the issue of capacity, as significant training would have to be achieved for sectors to have employees actively involved in the automated decision making workflow.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;An example in the context of the healthcare field was brought up by one participant arguing for human in the loop in regards to prescriptive scenarios. The participant suggested that AI technology, when given x-ray or MRI data for example, should only be limited to pointing out the correlations of diseases with patients’ scans/x-rays. Analysis of such correlations should be reserved for the medical expertise of doctors who would then determine if any instances of causality can be identified from this data and if it’s appropriate for diagnosing patients.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;It was emphasized that, despite a preference for human on/in the loop in regards to automated decision making, there is a need to be cognisant of techno-solutionism due to the human tendency of over reliance on technology when making decisions. A need for command and control structures and protocols was emphasized for various governance sectors in order to avoid potentially disastrous results through a checks and balances system. It was noted that the defense sector has already developed such protocols, having established a chain of command due to its long history of algorithmic decision making (e.g. the Aegis Combat System being used by the US Navy in the 1980s).&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;One key reason why militaries prefer human in and on the loop systems as opposed to out of the loop systems is because of the protocol associated with human action on the battlefield. International Humanitarian Law has clear indicators of what constitutes a war crime and who is to be held responsible in the scenario but developing such a framework with AI systems would be challenging as it would be difficult to determine which party ought to be held accountable in the case of a transgression or a mistake.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Takeaway Point: It was reiterated by many participants that neither AI technology or India’s regulatory framework is at a point where AI can be trusted to make significant decisions alone -- especially when such decisions are evaluating humans directly. It was recommended that human out of the loop decision making should be reserved for descriptive practices whereas human on and in the loop decision making should be used for prescriptive practices. Lastly, it was also suggested that appropriate protocols be put in place to direct those involved in the automated decision making workflow. Particularly when the process involves judgements and complex decision making in sectors such as jurisprudence and the military.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;The Social and Power Relations Surrounding AI&lt;/span&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Some participants emphasized the need to contextualize discussions of AI and governance within larger themes of poverty, global capital and power/social relations. Their concerns were that the use of AI technologies would only create and reinforce existing power structures and should instead be utilized towards ameliorating such issues. Manual scavenging, for example, was identified as an area where AI could be used to good effect if coupled with larger socio-political policy changes. There are several hierarchies that could potentially be reinforced through this process and all these failings needed to be examined thoroughly before such a system was adopted and incorporated within the real world.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Furthermore the discussion also revealed that the objectivity attributed to AI and ML tends to gloss over the fact that there are nonetheless implicit biases that exist in the minds of the creators that might work themselves into the code. Fears regarding technology recreating a more exclusionary system were not entirely unfounded as participants pointed out the fact that the knowledge base of the user would determine whether technology was used as a tool of centralization or democratization.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;One participant also questioned the concept of governance itself, contrasting the Indian government’s usage of the term in the 1950s (as it appears in the Directive Principle) with that of the World Bank in the 1990s.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Some participants emphasized the need to contextualize discussions of AI and governance within larger themes of poverty, global capital and power/social relations. Their concerns were that the use of AI technologies would only create and reinforce existing power structures and should instead be utilized towards ameliorating such issues. Manual scavenging, for example, was identified as an area where AI could be used to good effect if coupled with larger socio-political policy changes. There are several hierarchies that could potentially be reinforced through this process and all these failings needed to be examined thoroughly before such a system was adopted and incorporated within the real world.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Furthermore the discussion also revealed that the objectivity attributed to AI and ML tends to gloss over the fact that there are nonetheless implicit biases that exist in the minds of the creators that might work themselves into the code. Fears regarding technology recreating a more exclusionary system were not entirely unfounded as participants pointed out the fact that the knowledge base of the user would determine whether technology was used as a tool of centralization or democratization. &lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;One participant also questioned the concept of governance itself, contrasting the Indian government’s usage of the term in the 1950s (as it appears in the Directive Principle) with that of the World Bank in the 1990s. &lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Takeaway Point: Discussions of the implementation and deployment of AI within the governance landscape should attempt to take into consideration larger power relations and concepts of equity.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;Regulatory Approaches to AI&lt;/span&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Many recognized the need for AI-specific regulations across Indian sectors, including governance. These regulations, participants stated, should draw from notions of accountability, algorithmic transparency and efficiency. Furthermore, it was also stated that such regulations should consider the variations across the different legs of the governance sector, especially in regards to defence. One participant, pointing to the larger trends towards automation, recommended the establishment of certain fundamental guidelines aimed at directing the applicability of AI in general. The participant drew attention to the need for a robust evaluation system for various sectors (the criminal justice system, the securities market, etc.) as a way of providing checks on algorithmic biases. Another emphasized for the need of regulations for better quality data as to ensure machine readability and processiblity for various AI systems.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Another key point that emerged was the importance of examining how specific algorithms performed processes like identification or detection. A participant recommended the need to examine the ways in which machines identify humans and what categories/biases could infiltrate machine-judgement. They reiterated that if a new element was introduced in the system, the pre-existing variables would be impacted as well. The participant further recommended that it would be useful to look at these systems in terms of the couplings that get created in order to determine what kinds of relations are fostered within that system.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;The roundtable saw some debate regarding the most appropriate approach to developing such regulations. Some participants argued for a harms-based approach, particularly in regards to determining if regulations are needed all together for specific sectors (as opposed to guidelines, best practices, etc.). The need to be cognisant of both individual and structural harms was emphasized, mindful of the possibility of algorithmic biases affecting traditionally marginalized groups.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Others only saw value in a harms based approach insomuch that it could help outline the appropriate penalties in an event of regulations being violated, arguing instead for a rights-based approach as it enabled greater room for technological changes. An approach that kept in mind emerging AI technologies was reiterated by a number of participants as being crucial to any regulatory framework. The need for a regulatory space that allowed for technological experimentation without the fear of constitutional violation was also communicated.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Takeaway Point: The need for a AI-specific regulatory framework cognisant of differentiations across sectors in India was emphasized. There is some debate about the most appropriate approach for such a framework, a harms-based approach being identified by many as providing the best perspective on regulatory need and penalties. Some identified the rights-based approach as providing the most flexibility for an rapidly evolving technological landscape.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;Challenges to Adopting AI&lt;/span&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Out of all the concerns regarding the adoption of algorithms, ML and AI, the two key points of resistance that emerged, centred around issues of accountability and transparency. Participants suggested that within an AI system, predictability would be a key concern, and in the absence of predictable outcomes, establishing redressal mechanisms would pose key challenges as well.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p id="_mcePaste"&gt;A discussion was also initiated regarding the problems involved in attributing responsibility within the AI chain as well as the need to demystify the process of using AI in daily life. While reiterating the current landscape, participants spoke about how the usage of AI is currently limited to the automation of certain tasks and processes in certain sectors where algorithmic processing is primarily used as a tool of data collection and analysis as opposed to an independent decision making tool.&lt;/p&gt;
&lt;div id="_mcePaste"&gt;&lt;/div&gt;
&lt;p id="_mcePaste"&gt;One of the suggestions and thought points that emerged during the discussion was whether a gradual adoption of AI on a sectoral basis might be more beneficial as it would provide breathing room in the middle to test the system and establish trust between the developers, providers, and consumers. This prompted a debate about the controllers and the consumers of AI and how the gap between the two would need to be negotiated. The debate also brought up larger concerns regarding the mystification of AI as a process itself and the complications of translating the code into communicable points of intervention.&lt;/p&gt;
&lt;div id="_mcePaste"&gt;&lt;/div&gt;
&lt;p id="_mcePaste"&gt;Another major issue that emerged was the question of attribution of responsibility in the case of mistakes. In the legal process as it currently exists, human imperfections notwithstanding, it would be possible to attribute the blame for decisions taken to certain actants undertaking the action. Similarly in the defence sector, it would be possible to trace the chain of command and identify key points of failure, but in the case of AI based judgements, it would be difficult to place responsibility or blame. This observation led to a debate regarding accountability in the AI chain. It was inconclusive whether the error should be attributed to the developer, the distributor or the consumer.&lt;/p&gt;
&lt;div id="_mcePaste"&gt;&lt;/div&gt;
&lt;p id="_mcePaste" style="text-align: justify; "&gt;A suggestion that was offered in order to counter the information asymmetry as well as reduce the mystification of computational method was to make the algorithm and its processes transparent. This sparked a debate, however, as participants stated that while such a state of transparency ought to be sought after and aspired towards, it would be accompanied by certain threats to the system. A key challenge that was pointed out was the fact that if the algorithm was made transparent, and its details were shared, there would be several ways to manipulate it, translate it and misuse it.&lt;/p&gt;
&lt;div id="_mcePaste"&gt;&lt;/div&gt;
&lt;p id="_mcePaste" style="text-align: justify; "&gt;Another question that emerged was the distribution of AI technologies and the centralization of the proliferation process particularly in terms of service provision. One participant suggested that given the limited nature of research being undertaken and the paucity of resources, a limited number of companies would end up holding the best tech, the best resources and the best people. They further suggested that these technologies might end up being rolled out as a service on a contractual basis. In which case it would be important to track how the service was being controlled and delivered. Models of transference would become central points of negotiation with alternations between procurement based, lease based, and ownership based models of service delivery. Participants suggested that this was going to be a key factor in determining how to approach these issues from a legal and policy standpoint.&lt;/p&gt;
&lt;div&gt;&lt;/div&gt;
&lt;p style="text-align: justify; "&gt;A discussion was also initiated regarding the problems involved in attributing responsibility within the AI chain as well as the need to demystify the process of using AI in daily life. While reiterating the current landscape, participants spoke about how the usage of AI is currently limited to the automation of certain tasks and processes in certain sectors where algorithmic processing is primarily used as a tool of data collection and analysis as opposed to an independent decision making tool.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;One of the suggestions and thought points that emerged during the discussion was whether a gradual adoption of AI on a sectoral basis might be more beneficial as it would provide breathing room in the middle to test the system and establish trust between the developers, providers, and consumers. This prompted a debate about the controllers and the consumers of AI and how the gap between the two would need to be negotiated. The debate also brought up larger concerns regarding the mystification of AI as a process itself and the complications of translating the code into communicable points of intervention.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Another major issue that emerged was the question of attribution of responsibility in the case of mistakes. In the legal process as it currently exists, human imperfections notwithstanding, it would be possible to attribute the blame for decisions taken to certain actants undertaking the action. Similarly in the defence sector, it would be possible to trace the chain of command and identify key points of failure, but in the case of AI based judgements, it would be difficult to place responsibility or blame. This observation led to a debate regarding accountability in the AI chain. It was inconclusive whether the error should be attributed to the developer, the distributor or the consumer.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A suggestion that was offered in order to counter the information asymmetry as well as reduce the mystification of computational method was to make the algorithm and its processes transparent. This sparked a debate, however, as participants stated that while such a state of transparency ought to be sought after and aspired towards, it would be accompanied by certain threats to the system. A key challenge that was pointed out was the fact that if the algorithm was made transparent, and its details were shared, there would be several ways to manipulate it, translate it and misuse it.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Another question that emerged was the distribution of AI technologies and the centralization of the proliferation process particularly in terms of service provision. One participant suggested that given the limited nature of research being undertaken and the paucity of resources, a limited number of companies would end up holding the best tech, the best resources and the best people. They further suggested that these technologies might end up being rolled out as a service on a contractual basis. In which case it would be important to track how the service was being controlled and delivered. Models of transference would become central points of negotiation with alternations between procurement based, lease based, and ownership based models of service delivery. Participants suggested that this was going to be a key factor in determining how to approach these issues from a legal and policy standpoint.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Takeaway Point: The two key points of resistance that emerged during the course of discussion were accountability and transparency. Participants pointed out the various challenges involved in attributing blame within the AI chain and they also spoke about the complexities of opening up AI code, thereby leaving it vulnerable to manipulation. Certain other challenges that were briefly touched upon were the information asymmetry, excessive data collection, centralization of power in the hands of the controllers and complicated service distribution models.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Conclusion&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The Roundtable provided some insight into larger debates regarding the deployment and applications of AI in the governance sector of India. The need for a regulatory framework as well as globally replicable standards surrounding AI was emphasized, particularly one mindful of the particular needs of differing fields of the governance sector (especially defence). Furthermore, a need for human on/in the loop practices with regards to automated decision making was highlighted for prescriptive instances, particularly when such decisions are responsible for directly evaluating humans. Contextualising AI within its sociopolitical parameters was another key recommendation as it would help filter out the biases that might work themselves into the code and affect the performance of the algorithm. Further, it is necessary to see the involvement and influence of the private sector in the deployment of AI for governance, it often translating into the delivery of technological services from private actors to public bodies towards discharge of public functions. This has clear implications for requirements of transparency  and procedural fairness even in private sector delivery of these services. Defining the meaning and scope of AI while working to demystify algorithms themselves would serve to strengthen regulatory frameworks as well as make AI more accessible for the user / consumer.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;[1]. Automated decision making model where final decisions are made by a human operator&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;[2]. Automated decision making model where decisions can be made without human involvement but a human can override the system.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;[3]. A completely autonomous decision making model requiring no human involvement&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;[4]. https://futureoflife.org/ai-principles/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;[5]. The participant was drawing this example from Cathy O’Neil’s Weapons of Math Destruction, (Penguin,2016), at 4-13.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/artificial-intelligence-in-governance-a-report-of-the-roundtable-held-in-new-delhi'&gt;https://cis-india.org/internet-governance/blog/artificial-intelligence-in-governance-a-report-of-the-roundtable-held-in-new-delhi&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Saman Goudarzi and Natallia Khaniejo</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-03T15:49:40Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>




</rdf:RDF>
